8204552: NMT: Separate thread stack tracking from virtual memory tracking
authorzgu
Tue, 26 Mar 2019 15:50:34 -0400
changeset 54292 c31faeacf00a
parent 54291 e6c86f6012bf
child 54293 dae71635cc3a
8204552: NMT: Separate thread stack tracking from virtual memory tracking Reviewed-by: stuefe, minqi
make/hotspot/lib/JvmFeatures.gmk
src/hotspot/share/services/memBaseline.cpp
src/hotspot/share/services/memReporter.cpp
src/hotspot/share/services/memTracker.cpp
src/hotspot/share/services/memTracker.hpp
src/hotspot/share/services/threadStackTracker.cpp
src/hotspot/share/services/threadStackTracker.hpp
src/hotspot/share/services/virtualMemoryTracker.cpp
--- a/make/hotspot/lib/JvmFeatures.gmk	Tue Mar 26 15:27:41 2019 -0400
+++ b/make/hotspot/lib/JvmFeatures.gmk	Tue Mar 26 15:50:34 2019 -0400
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -126,7 +126,7 @@
   JVM_CFLAGS_FEATURES += -DINCLUDE_NMT=0
   JVM_EXCLUDE_FILES += \
       memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
-      memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
+      memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp threadStackTracker.cpp
 endif
 
 ifneq ($(call check-jvm-feature, aot), true)
--- a/src/hotspot/share/services/memBaseline.cpp	Tue Mar 26 15:27:41 2019 -0400
+++ b/src/hotspot/share/services/memBaseline.cpp	Tue Mar 26 15:50:34 2019 -0400
@@ -156,6 +156,11 @@
     return false;
   }
 
+  // Walk simple thread stacks
+  if (!ThreadStackTracker::walk_simple_thread_stack_site(&malloc_walker)) {
+    return false;
+  }
+
   _malloc_sites.move(malloc_walker.malloc_sites());
   // The malloc sites are collected in size order
   _malloc_sites_order = by_size;
--- a/src/hotspot/share/services/memReporter.cpp	Tue Mar 26 15:27:41 2019 -0400
+++ b/src/hotspot/share/services/memReporter.cpp	Tue Mar 26 15:50:34 2019 -0400
@@ -26,6 +26,7 @@
 #include "memory/allocation.hpp"
 #include "services/mallocTracker.hpp"
 #include "services/memReporter.hpp"
+#include "services/threadStackTracker.hpp"
 #include "services/virtualMemoryTracker.hpp"
 #include "utilities/globalDefinitions.hpp"
 
@@ -46,11 +47,13 @@
 void MemReporterBase::print_malloc(size_t amount, size_t count, MEMFLAGS flag) const {
   const char* scale = current_scale();
   outputStream* out = output();
+  const char* alloc_type = (flag == mtThreadStack) ? "" : "malloc=";
+
   if (flag != mtNone) {
-    out->print("(malloc=" SIZE_FORMAT "%s type=%s",
+    out->print("(%s" SIZE_FORMAT "%s type=%s", alloc_type,
       amount_in_current_scale(amount), scale, NMTUtil::flag_to_name(flag));
   } else {
-    out->print("(malloc=" SIZE_FORMAT "%s",
+    out->print("(%s" SIZE_FORMAT "%s", alloc_type,
       amount_in_current_scale(amount), scale);
   }
 
@@ -126,10 +129,17 @@
 
   // Count thread's native stack in "Thread" category
   if (flag == mtThread) {
-    const VirtualMemory* thread_stack_usage =
-      (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
-    reserved_amount  += thread_stack_usage->reserved();
-    committed_amount += thread_stack_usage->committed();
+    if (ThreadStackTracker::track_as_vm()) {
+      const VirtualMemory* thread_stack_usage =
+        (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
+      reserved_amount  += thread_stack_usage->reserved();
+      committed_amount += thread_stack_usage->committed();
+    } else {
+      const MallocMemory* thread_stack_usage =
+        (const MallocMemory*)_malloc_snapshot->by_type(mtThreadStack);
+      reserved_amount += thread_stack_usage->malloc_size();
+      committed_amount += thread_stack_usage->malloc_size();
+    }
   } else if (flag == mtNMT) {
     // Count malloc headers in "NMT" category
     reserved_amount  += _malloc_snapshot->malloc_overhead()->size();
@@ -150,12 +160,22 @@
       out->print_cr("%27s (  instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")",
         " ", _instance_class_count, _array_class_count);
     } else if (flag == mtThread) {
-      // report thread count
-      out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", _malloc_snapshot->thread_count());
-      const VirtualMemory* thread_stack_usage =
-       _vm_snapshot->by_type(mtThreadStack);
-      out->print("%27s (stack: ", " ");
-      print_total(thread_stack_usage->reserved(), thread_stack_usage->committed());
+      if (ThreadStackTracker::track_as_vm()) {
+        const VirtualMemory* thread_stack_usage =
+         _vm_snapshot->by_type(mtThreadStack);
+        // report thread count
+        out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", ThreadStackTracker::thread_count());
+        out->print("%27s (stack: ", " ");
+        print_total(thread_stack_usage->reserved(), thread_stack_usage->committed());
+      } else {
+        MallocMemory* thread_stack_memory = _malloc_snapshot->by_type(mtThreadStack);
+        const char* scale = current_scale();
+        // report thread count
+        assert(ThreadStackTracker::thread_count() == 0, "Not used");
+        out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", thread_stack_memory->malloc_count());
+        out->print("%27s (Stack: " SIZE_FORMAT "%s", " ",
+          amount_in_current_scale(thread_stack_memory->malloc_size()), scale);
+      }
       out->print_cr(")");
     }
 
@@ -368,10 +388,11 @@
     size_t early_amount, size_t early_count, MEMFLAGS flags) const {
   const char* scale = current_scale();
   outputStream* out = output();
+  const char* alloc_type = (flags == mtThread) ? "" : "malloc=";
 
-  out->print("malloc=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
-  // Report type only if it is valid
-  if (flags != mtNone) {
+  out->print("%s" SIZE_FORMAT "%s", alloc_type, amount_in_current_scale(current_amount), scale);
+  // Report type only if it is valid and not under "thread" category
+  if (flags != mtNone && flags != mtThread) {
     out->print(" type=%s", NMTUtil::flag_to_name(flags));
   }
 
@@ -497,15 +518,25 @@
       }
       out->print_cr(")");
 
-      // report thread stack
-      const VirtualMemory* current_thread_stack =
+      out->print("%27s (stack: ", " ");
+      if (ThreadStackTracker::track_as_vm()) {
+        // report thread stack
+        const VirtualMemory* current_thread_stack =
           _current_baseline.virtual_memory(mtThreadStack);
-      const VirtualMemory* early_thread_stack =
-        _early_baseline.virtual_memory(mtThreadStack);
+        const VirtualMemory* early_thread_stack =
+          _early_baseline.virtual_memory(mtThreadStack);
 
-      out->print("%27s (stack: ", " ");
-      print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
-        early_thread_stack->reserved(), early_thread_stack->committed());
+        print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
+          early_thread_stack->reserved(), early_thread_stack->committed());
+      } else {
+        const MallocMemory* current_thread_stack =
+          _current_baseline.malloc_memory(mtThreadStack);
+        const MallocMemory* early_thread_stack =
+          _early_baseline.malloc_memory(mtThreadStack);
+
+        print_malloc_diff(current_thread_stack->malloc_size(), current_thread_stack->malloc_count(),
+          early_thread_stack->malloc_size(), early_thread_stack->malloc_count(), flag);
+      }
       out->print_cr(")");
     }
 
--- a/src/hotspot/share/services/memTracker.cpp	Tue Mar 26 15:27:41 2019 -0400
+++ b/src/hotspot/share/services/memTracker.cpp	Tue Mar 26 15:50:34 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
 #include "services/memReporter.hpp"
 #include "services/mallocTracker.inline.hpp"
 #include "services/memTracker.hpp"
+#include "services/threadStackTracker.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/vmError.hpp"
@@ -92,7 +93,8 @@
 void MemTracker::init() {
   NMT_TrackingLevel level = tracking_level();
   if (level >= NMT_summary) {
-    if (!VirtualMemoryTracker::late_initialize(level)) {
+    if (!VirtualMemoryTracker::late_initialize(level) ||
+        !ThreadStackTracker::late_initialize(level)) {
       shutdown();
       return;
     }
@@ -164,6 +166,7 @@
     OrderAccess::fence();
     VirtualMemoryTracker::transition(current_level, level);
     MallocTracker::transition(current_level, level);
+    ThreadStackTracker::transition(current_level, level);
   } else {
     // Upgrading tracking level is not supported and has never been supported.
     // Allocating and deallocating malloc tracking structures is not thread safe and
--- a/src/hotspot/share/services/memTracker.hpp	Tue Mar 26 15:27:41 2019 -0400
+++ b/src/hotspot/share/services/memTracker.hpp	Tue Mar 26 15:50:34 2019 -0400
@@ -82,6 +82,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/threadCritical.hpp"
 #include "services/mallocTracker.hpp"
+#include "services/threadStackTracker.hpp"
 #include "services/virtualMemoryTracker.hpp"
 
 extern volatile bool NMT_stack_walkable;
@@ -241,31 +242,19 @@
     }
   }
 
-#ifdef _AIX
-  // See JDK-8202772 - temporarily disable thread stack tracking on AIX.
-  static inline void record_thread_stack(void* addr, size_t size) {}
-  static inline void release_thread_stack(void* addr, size_t size) {}
-#else
-  static inline void record_thread_stack(void* addr, size_t size) {
+  static void record_thread_stack(void* addr, size_t size) {
     if (tracking_level() < NMT_summary) return;
     if (addr != NULL) {
-      // uses thread stack malloc slot for book keeping number of threads
-      MallocMemorySummary::record_malloc(0, mtThreadStack);
-      record_virtual_memory_reserve(addr, size, CALLER_PC, mtThreadStack);
+      ThreadStackTracker::new_thread_stack((address)addr, size, CALLER_PC);
     }
   }
 
   static inline void release_thread_stack(void* addr, size_t size) {
     if (tracking_level() < NMT_summary) return;
     if (addr != NULL) {
-      // uses thread stack malloc slot for book keeping number of threads
-      MallocMemorySummary::record_free(0, mtThreadStack);
-      ThreadCritical tc;
-      if (tracking_level() < NMT_summary) return;
-      VirtualMemoryTracker::remove_released_region((address)addr, size);
+      ThreadStackTracker::delete_thread_stack((address)addr, size);
     }
   }
-#endif
 
   // Query lock is used to synchronize the access to tracking data.
   // So far, it is only used by JCmd query, but it may be used by
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/services/threadStackTracker.cpp	Tue Mar 26 15:50:34 2019 -0400
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "runtime/atomic.hpp"
+#include "runtime/threadCritical.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/memTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
+#include "services/threadStackTracker.hpp"
+
+volatile size_t ThreadStackTracker::_thread_count = 0;
+SortedLinkedList<SimpleThreadStackSite, ThreadStackTracker::compare_thread_stack_base>* ThreadStackTracker::_simple_thread_stacks = NULL;
+
+bool ThreadStackTracker::late_initialize(NMT_TrackingLevel level) {
+  if (level == NMT_detail && !track_as_vm()) {
+    _simple_thread_stacks = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
+      SortedLinkedList<SimpleThreadStackSite, ThreadStackTracker::compare_thread_stack_base>();
+    return (_simple_thread_stacks != NULL);
+  }
+  return true;
+}
+
+bool ThreadStackTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
+  assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
+  if (to == NMT_minimal) {
+    assert(from == NMT_summary || from == NMT_detail, "Just check");
+    ThreadCritical tc;
+    if (_simple_thread_stacks != NULL) {
+      delete _simple_thread_stacks;
+      _simple_thread_stacks = NULL;
+    }
+  }
+  return true;
+}
+
+int ThreadStackTracker::compare_thread_stack_base(const SimpleThreadStackSite& s1, const SimpleThreadStackSite& s2) {
+  return s1.base() - s2.base();
+}
+
+void ThreadStackTracker::new_thread_stack(void* base, size_t size, const NativeCallStack& stack) {
+  assert(MemTracker::tracking_level() >= NMT_summary, "Must be");
+  assert(base != NULL, "Should have been filtered");
+  if (track_as_vm()) {
+    ThreadCritical tc;
+    VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
+    _thread_count ++;
+  } else {
+    // Use a slot in mallocMemorySummary for thread stack bookkeeping
+    MallocMemorySummary::record_malloc(size, mtThreadStack);
+    if (MemTracker::tracking_level() == NMT_detail) {
+      ThreadCritical tc;
+      assert(_simple_thread_stacks != NULL, "Must be initialized");
+      SimpleThreadStackSite site((address)base, size, stack);
+      _simple_thread_stacks->add(site);
+    }
+  }
+}
+
+void ThreadStackTracker::delete_thread_stack(void* base, size_t size) {
+  assert(MemTracker::tracking_level() >= NMT_summary, "Must be");
+  assert(base != NULL, "Should have been filtered");
+  if(track_as_vm()) {
+    ThreadCritical tc;
+    VirtualMemoryTracker::remove_released_region((address)base, size);
+    _thread_count--;
+  } else {
+    // Use a slot in mallocMemorySummary for thread stack bookkeeping
+    MallocMemorySummary::record_free(size, mtThreadStack);
+    if (MemTracker::tracking_level() == NMT_detail) {
+      ThreadCritical tc;
+      assert(_simple_thread_stacks != NULL, "Must be initialized");
+      SimpleThreadStackSite site((address)base, size);
+      bool removed = _simple_thread_stacks->remove(site);
+      assert(removed, "Must exist");
+    }
+  }
+}
+
+bool ThreadStackTracker::walk_simple_thread_stack_site(MallocSiteWalker* walker) {
+  if (!track_as_vm()) {
+    LinkedListImpl<MallocSite> _sites;
+    {
+      ThreadCritical tc;
+      assert(_simple_thread_stacks != NULL, "Must be initialized");
+      LinkedListIterator<SimpleThreadStackSite> itr(_simple_thread_stacks->head());
+      const SimpleThreadStackSite* ts = itr.next();
+      // Consolidate sites and convert to MallocSites, so we can piggyback into
+      // malloc snapshot
+      while (ts != NULL) {
+        MallocSite site(*ts->call_stack(), mtThreadStack);
+        MallocSite* exist = _sites.find(site);
+        if (exist != NULL) {
+          exist->allocate(ts->size());
+        } else {
+          site.allocate(ts->size());
+          _sites.add(site);
+        }
+        ts = itr.next();
+      }
+    }
+
+    // Piggyback to malloc snapshot
+    LinkedListIterator<MallocSite> site_itr(_sites.head());
+    const MallocSite* s = site_itr.next();
+    while (s != NULL) {
+      walker->do_malloc_site(s);
+      s = site_itr.next();
+    }
+  }
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/services/threadStackTracker.hpp	Tue Mar 26 15:50:34 2019 -0400
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_SERVICES_THREADSTACKTRACKER_HPP
+#define SHARE_SERVICES_THREADSTACKTRACKER_HPP
+
+#if INCLUDE_NMT
+
+#include "services/allocationSite.hpp"
+#include "services/mallocSiteTable.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/nativeCallStack.hpp"
+#include "utilities/linkedlist.hpp"
+
+class SimpleThreadStackSite;
+
+class SimpleThreadStack {
+  friend class SimpleThreadStackSite;
+private:
+  address _base;
+  size_t  _size;
+public:
+  SimpleThreadStack() : _base(NULL), _size(0) { }
+  bool equals(const SimpleThreadStack& s) const {
+    return base() == s.base();
+  }
+
+  size_t  size() const { return _size; }
+  address base() const { return _base; }
+private:
+  void set_size(size_t size)  { _size = size; }
+  void set_base(address base) { _base = base; }
+};
+
+class SimpleThreadStackSite : public AllocationSite<SimpleThreadStack> {
+public:
+  SimpleThreadStackSite(address base, size_t size, const NativeCallStack& stack) :
+    AllocationSite<SimpleThreadStack>(stack, mtThreadStack) {
+    data()->set_size(size);
+    data()->set_base(base);
+  }
+
+  SimpleThreadStackSite(address base, size_t size) :
+    AllocationSite<SimpleThreadStack>(NativeCallStack::empty_stack(), mtThreadStack) {
+    data()->set_base(base);
+    data()->set_size(size);
+  }
+
+  bool equals(const SimpleThreadStackSite& mts) const {
+    bool eq = base() == mts.base();
+    assert(!eq || size() == mts.size(), "Must match");
+    return eq;
+  }
+
+  size_t  size() const { return peek()->size(); }
+  address base() const { return peek()->base(); }
+};
+
+  /*
+   * Most of platforms, that hotspot support, have their thread stacks backed by
+   * virtual memory by default. For these cases, thread stack tracker simply
+   * delegates tracking to virtual memory tracker.
+   * However, there are exceptions, (e.g. AIX), that platforms can provide stacks
+   * that are not page aligned. A hypothetical VM implementation, it can provide
+   * it own stacks. In these case, track_as_vm() should return false and manage
+   * stack tracking by this tracker internally.
+   * During memory snapshot, tracked thread stacks memory data is walked and stored
+   * along with malloc'd data inside baseline. The regions are not scanned and assumed
+   * all committed for now. Can add scanning phase when there is a need.
+   */
+class ThreadStackTracker : AllStatic {
+private:
+  static volatile size_t _thread_count;
+
+  static int compare_thread_stack_base(const SimpleThreadStackSite& s1, const SimpleThreadStackSite& s2);
+  static SortedLinkedList<SimpleThreadStackSite, compare_thread_stack_base>* _simple_thread_stacks;
+public:
+  // Late phase initialization
+  static bool late_initialize(NMT_TrackingLevel level);
+  static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
+
+  static void new_thread_stack(void* base, size_t size, const NativeCallStack& stack);
+  static void delete_thread_stack(void* base, size_t size);
+
+  static bool   track_as_vm()  { return AIX_ONLY(false) NOT_AIX(true); }
+  static size_t thread_count() { return _thread_count; }
+
+  // Snapshot support. Piggyback thread stack data in malloc slot, NMT always handles
+  // thread stack slot specially since beginning.
+  static bool walk_simple_thread_stack_site(MallocSiteWalker* walker);
+};
+
+#endif // INCLUDE_NMT
+#endif // SHARE_SERVICES_THREADSTACKTRACKER_HPP
--- a/src/hotspot/share/services/virtualMemoryTracker.cpp	Tue Mar 26 15:27:41 2019 -0400
+++ b/src/hotspot/share/services/virtualMemoryTracker.cpp	Tue Mar 26 15:50:34 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "runtime/os.hpp"
 #include "runtime/threadCritical.hpp"
 #include "services/memTracker.hpp"
+#include "services/threadStackTracker.hpp"
 #include "services/virtualMemoryTracker.hpp"
 
 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
@@ -40,9 +41,12 @@
 }
 
 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
-  // Snapshot current thread stacks
-  VirtualMemoryTracker::snapshot_thread_stacks();
-  as_snapshot()->copy_to(s);
+  // Only if thread stack is backed by virtual memory
+  if (ThreadStackTracker::track_as_vm()) {
+    // Snapshot current thread stacks
+    VirtualMemoryTracker::snapshot_thread_stacks();
+    as_snapshot()->copy_to(s);
+  }
 }
 
 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;