8068451: Kitchensink fails with assert(_size >= sz) failed: Negative size
authorcoleenp
Tue, 10 Feb 2015 15:10:47 -0500
changeset 28953 75f0bfa6ea14
parent 28952 7fe008f8f88c
child 28955 c90fb9fd47bd
8068451: Kitchensink fails with assert(_size >= sz) failed: Negative size Summary: Need ThreadCritical lock around chunk pool cleaning and copying for snapshot Reviewed-by: lfoltan, gtriantafill, hseigel
hotspot/src/share/vm/memory/allocation.cpp
hotspot/src/share/vm/services/mallocSiteTable.cpp
hotspot/src/share/vm/services/mallocTracker.cpp
hotspot/src/share/vm/services/mallocTracker.hpp
hotspot/src/share/vm/services/nmtDCmd.cpp
--- a/hotspot/src/share/vm/memory/allocation.cpp	Tue Feb 10 16:53:00 2015 +0000
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Tue Feb 10 15:10:47 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -275,31 +275,30 @@
     Chunk* cur = NULL;
     Chunk* next;
     {
-    // if we have more than n chunks, free all of them
-    ThreadCritical tc;
-    if (_num_chunks > n) {
-      // free chunks at end of queue, for better locality
+      // if we have more than n chunks, free all of them
+      ThreadCritical tc;
+      if (_num_chunks > n) {
+        // free chunks at end of queue, for better locality
         cur = _first;
-      for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
+        for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
 
-      if (cur != NULL) {
+        if (cur != NULL) {
           next = cur->next();
-        cur->set_next(NULL);
-        cur = next;
+          cur->set_next(NULL);
+          cur = next;
 
-          _num_chunks = n;
+          // Free all remaining chunks while in ThreadCritical lock
+          // so NMT adjustment is stable.
+          while(cur != NULL) {
+            next = cur->next();
+            os::free(cur);
+            _num_chunks--;
+            cur = next;
+          }
         }
       }
     }
-
-    // Free all remaining chunks, outside of ThreadCritical
-    // to avoid deadlock with NMT
-        while(cur != NULL) {
-          next = cur->next();
-      os::free(cur);
-          cur = next;
-        }
-      }
+  }
 
   // Accessors to preallocated pool's
   static ChunkPool* large_pool()  { assert(_large_pool  != NULL, "must be initialized"); return _large_pool;  }
@@ -384,7 +383,9 @@
    case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
    case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
    case Chunk::tiny_size:   ChunkPool::tiny_pool()->free(c); break;
-   default:                 os::free(c);
+   default:
+     ThreadCritical tc;  // Free chunks under TC lock so that NMT adjustment is stable.
+     os::free(c);
   }
 }
 
--- a/hotspot/src/share/vm/services/mallocSiteTable.cpp	Tue Feb 10 16:53:00 2015 +0000
+++ b/hotspot/src/share/vm/services/mallocSiteTable.cpp	Tue Feb 10 15:10:47 2015 -0500
@@ -136,7 +136,7 @@
 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
   size_t* pos_idx) {
   int index = hash_to_index(key.hash());
-  assert(index >= 0, "Negative index");
+  assert(index >= 0, err_msg("Negative index %d", index));
   *bucket_idx = (size_t)index;
   *pos_idx = 0;
 
--- a/hotspot/src/share/vm/services/mallocTracker.cpp	Tue Feb 10 16:53:00 2015 +0000
+++ b/hotspot/src/share/vm/services/mallocTracker.cpp	Tue Feb 10 15:10:47 2015 -0500
@@ -52,7 +52,7 @@
 }
 
 // Make adjustment by subtracting chunks used by arenas
-// from total chunks to get total free chunck size
+// from total chunks to get total free chunk size
 void MallocMemorySnapshot::make_adjustment() {
   size_t arena_size = total_arena();
   int chunk_idx = NMTUtil::flag_to_index(mtChunk);
--- a/hotspot/src/share/vm/services/mallocTracker.hpp	Tue Feb 10 16:53:00 2015 +0000
+++ b/hotspot/src/share/vm/services/mallocTracker.hpp	Tue Feb 10 15:10:47 2015 -0500
@@ -29,6 +29,7 @@
 
 #include "memory/allocation.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/threadCritical.hpp"
 #include "services/nmtCommon.hpp"
 #include "utilities/nativeCallStack.hpp"
 
@@ -164,6 +165,10 @@
   }
 
   void copy_to(MallocMemorySnapshot* s) {
+    // Need to make sure that mtChunks don't get deallocated while the
+    // copy is going on, because their size is adjusted using this
+    // buffer in make_adjustment().
+    ThreadCritical tc;
     s->_tracking_header = _tracking_header;
     for (int index = 0; index < mt_number_of_types; index ++) {
       s->_malloc[index] = _malloc[index];
--- a/hotspot/src/share/vm/services/nmtDCmd.cpp	Tue Feb 10 16:53:00 2015 +0000
+++ b/hotspot/src/share/vm/services/nmtDCmd.cpp	Tue Feb 10 15:10:47 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,8 +137,8 @@
     }
   } else if (_detail_diff.value()) {
     if (!check_detail_tracking_level(output())) {
-    return;
-  }
+      return;
+    }
     MemBaseline& baseline = MemTracker::get_baseline();
     if (baseline.baseline_type() == MemBaseline::Detail_baselined) {
       report_diff(false, scale_unit);