Merge
authorjcoomes
Fri, 25 Oct 2013 08:38:42 -0700 (2013-10-25)
changeset 21120 96752cc31d9a
parent 21111 c44e0cc2287b (current diff)
parent 21119 ca1c9dd0e277 (diff)
child 21121 13045b381bc9
child 21189 e851a0a007ce
child 21193 e372e03e782b
Merge
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Thu Oct 24 16:25:24 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Fri Oct 25 08:38:42 2013 -0700
@@ -55,7 +55,7 @@
   // then _alloc_region is NULL and this object should not be used to
   // satisfy allocation requests (it was done this way to force the
   // correct use of init() and release()).
-  HeapRegion* _alloc_region;
+  HeapRegion* volatile _alloc_region;
 
   // It keeps track of the distinct number of regions that are used
   // for allocation in the active interval of this object, i.e.,
@@ -132,8 +132,9 @@
   static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
 
   HeapRegion* get() const {
+    HeapRegion * hr = _alloc_region;
     // Make sure that the dummy region does not escape this class.
-    return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
+    return (hr == _dummy_region) ? NULL : hr;
   }
 
   uint count() { return _count; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Thu Oct 24 16:25:24 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Fri Oct 25 08:38:42 2013 -0700
@@ -187,19 +187,23 @@
   size_t code_root_elems() const { return _code_root_elems; }
 
   void print_rs_mem_info_on(outputStream * out, size_t total) {
-    out->print_cr("    %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
+    out->print_cr("    "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions",
+        round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
   }
 
   void print_cards_occupied_info_on(outputStream * out, size_t total) {
-    out->print_cr("     %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
+    out->print_cr("     "SIZE_FORMAT_W(8)" (%5.1f%%) entries by "SIZE_FORMAT" %s regions",
+        cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
   }
 
   void print_code_root_mem_info_on(outputStream * out, size_t total) {
-    out->print_cr("    %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
+    out->print_cr("    "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions",
+        round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
   }
 
   void print_code_root_elems_info_on(outputStream * out, size_t total) {
-    out->print_cr("     %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
+    out->print_cr("     "SIZE_FORMAT_W(8)" (%5.1f%%) elements by "SIZE_FORMAT" %s regions",
+        code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
   }
 };
 
@@ -327,14 +331,14 @@
   out->print_cr("\n Recent concurrent refinement statistics");
   out->print_cr("  Processed "SIZE_FORMAT" cards",
                 num_concurrent_refined_cards());
-  out->print_cr("  Of %d completed buffers:", num_processed_buf_total());
-  out->print_cr("     %8d (%5.1f%%) by concurrent RS threads.",
+  out->print_cr("  Of "SIZE_FORMAT" completed buffers:", num_processed_buf_total());
+  out->print_cr("     "SIZE_FORMAT_W(8)" (%5.1f%%) by concurrent RS threads.",
                 num_processed_buf_total(),
                 percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
-  out->print_cr("     %8d (%5.1f%%) by mutator threads.",
+  out->print_cr("     "SIZE_FORMAT_W(8)" (%5.1f%%) by mutator threads.",
                 num_processed_buf_mutator(),
                 percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
-  out->print_cr("  Did %d coarsenings.", num_coarsenings());
+  out->print_cr("  Did "SIZE_FORMAT" coarsenings.", num_coarsenings());
   out->print_cr("  Concurrent RS threads times (s)");
   out->print("     ");
   for (uint i = 0; i < _num_vtimes; i++) {
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu Oct 24 16:25:24 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Fri Oct 25 08:38:42 2013 -0700
@@ -75,8 +75,7 @@
   ClassSmallChunk = 256,
   SmallChunk = 512,
   ClassMediumChunk = 4 * K,
-  MediumChunk = 8 * K,
-  HumongousChunkGranularity = 8
+  MediumChunk = 8 * K
 };
 
 static ChunkIndex next_chunk_index(ChunkIndex i) {
@@ -92,6 +91,7 @@
 
 // Manages the global free lists of chunks.
 class ChunkManager : public CHeapObj<mtInternal> {
+  friend class TestVirtualSpaceNodeTest;
 
   // Free list of chunks of different sizes.
   //   SpecializedChunk
@@ -257,6 +257,8 @@
   // VirtualSpace
   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 
+  // Committed but unused space in the virtual space
+  size_t free_words_in_vs() const;
  public:
 
   VirtualSpaceNode(size_t byte_size);
@@ -301,7 +303,6 @@
   // used and capacity in this single entry in the list
   size_t used_words_in_vs() const;
   size_t capacity_words_in_vs() const;
-  size_t free_words_in_vs() const;
 
   bool initialize();
 
@@ -319,6 +320,13 @@
   // in the node from any freelist.
   void purge(ChunkManager* chunk_manager);
 
+  // If an allocation doesn't fit in the current node a new node is created.
+  // Allocate chunks out of the remaining committed space in this node
+  // to avoid wasting that memory.
+  // This always adds up because all the chunk sizes are multiples of
+  // the smallest chunk size.
+  void retire(ChunkManager* chunk_manager);
+
 #ifdef ASSERT
   // Debug support
   void mangle();
@@ -461,6 +469,10 @@
   // and is typically followed by the allocation of a chunk.
   bool create_new_virtual_space(size_t vs_word_size);
 
+  // Chunk up the unused committed space in the current
+  // virtual space and add the chunks to the free list.
+  void retire_current_virtual_space();
+
  public:
   VirtualSpaceList(size_t word_size);
   VirtualSpaceList(ReservedSpace rs);
@@ -624,10 +636,12 @@
   bool is_class() { return _mdtype == Metaspace::ClassType; }
 
   // Accessors
-  size_t specialized_chunk_size() { return SpecializedChunk; }
-  size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
-  size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
-  size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
+  size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
+  size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
+  size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
+  size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
+
+  size_t smallest_chunk_size()  { return specialized_chunk_size(); }
 
   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
@@ -1056,6 +1070,35 @@
 #endif
 }
 
+void VirtualSpaceList::retire_current_virtual_space() {
+  assert_lock_strong(SpaceManager::expand_lock());
+
+  VirtualSpaceNode* vsn = current_virtual_space();
+
+  ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
+                                  Metaspace::chunk_manager_metadata();
+
+  vsn->retire(cm);
+}
+
+void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
+  for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
+    ChunkIndex index = (ChunkIndex)i;
+    size_t chunk_size = chunk_manager->free_chunks(index)->size();
+
+    while (free_words_in_vs() >= chunk_size) {
+      DEBUG_ONLY(verify_container_count();)
+      Metachunk* chunk = get_chunk_vs(chunk_size);
+      assert(chunk != NULL, "allocation should have been successful");
+
+      chunk_manager->return_chunks(index, chunk);
+      chunk_manager->inc_free_chunks_total(chunk_size);
+      DEBUG_ONLY(verify_container_count();)
+    }
+  }
+  assert(free_words_in_vs() == 0, "should be empty now");
+}
+
 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
                                    _is_class(false),
                                    _virtual_space_list(NULL),
@@ -1181,6 +1224,7 @@
   if (vs_expanded) {
     return true;
   }
+  retire_current_virtual_space();
 
   // Get another virtual space.
   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
@@ -1902,12 +1946,12 @@
     chunk_word_size = medium_chunk_size();
   }
 
-  // Might still need a humongous chunk.  Enforce an
-  // eight word granularity to facilitate reuse (some
-  // wastage but better chance of reuse).
+  // Might still need a humongous chunk.  Enforce
+  // humongous allocations sizes to be aligned up to
+  // the smallest chunk size.
   size_t if_humongous_sized_chunk =
     align_size_up(word_size + Metachunk::overhead(),
-                  HumongousChunkGranularity);
+                  smallest_chunk_size());
   chunk_word_size =
     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
 
@@ -2151,10 +2195,10 @@
     }
     assert(humongous_chunks->word_size() == (size_t)
            align_size_up(humongous_chunks->word_size(),
-                             HumongousChunkGranularity),
+                             smallest_chunk_size()),
            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
                    " granularity %d",
-                   humongous_chunks->word_size(), HumongousChunkGranularity));
+                   humongous_chunks->word_size(), smallest_chunk_size()));
     Metachunk* next_humongous_chunks = humongous_chunks->next();
     humongous_chunks->container()->dec_container_count();
     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
@@ -3301,9 +3345,7 @@
   }
 
   if (result == NULL) {
-    report_metadata_oome(loader_data, word_size, mdtype, THREAD);
-    // Will not reach here.
-    return NULL;
+    report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL);
   }
 
   // Zero initialize.
@@ -3494,4 +3536,94 @@
   TestMetaspaceAuxTest::test();
 }
 
+class TestVirtualSpaceNodeTest {
+  static void chunk_up(size_t words_left, size_t& num_medium_chunks,
+                                          size_t& num_small_chunks,
+                                          size_t& num_specialized_chunks) {
+    num_medium_chunks = words_left / MediumChunk;
+    words_left = words_left % MediumChunk;
+
+    num_small_chunks = words_left / SmallChunk;
+    words_left = words_left % SmallChunk;
+    // how many specialized chunks can we get?
+    num_specialized_chunks = words_left / SpecializedChunk;
+    assert(words_left % SpecializedChunk == 0, "should be nothing left");
+  }
+
+ public:
+  static void test() {
+    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+    const size_t vsn_test_size_words = MediumChunk  * 4;
+    const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
+
+    // The chunk sizes must be multiples of eachother, or this will fail
+    STATIC_ASSERT(MediumChunk % SmallChunk == 0);
+    STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
+
+    { // No committed memory in VSN
+      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
+      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      vsn.initialize();
+      vsn.retire(&cm);
+      assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
+    }
+
+    { // All of VSN is committed, half is used by chunks
+      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
+      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      vsn.initialize();
+      vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
+      vsn.get_chunk_vs(MediumChunk);
+      vsn.get_chunk_vs(MediumChunk);
+      vsn.retire(&cm);
+      assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
+      assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
+    }
+
+    { // 4 pages of VSN is committed, some is used by chunks
+      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
+      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
+      assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
+      vsn.initialize();
+      vsn.expand_by(page_chunks, page_chunks);
+      vsn.get_chunk_vs(SmallChunk);
+      vsn.get_chunk_vs(SpecializedChunk);
+      vsn.retire(&cm);
+
+      // committed - used = words left to retire
+      const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
+
+      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
+      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
+
+      assert(num_medium_chunks == 0, "should not get any medium chunks");
+      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
+      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
+    }
+
+    { // Half of VSN is committed, a humongous chunk is used
+      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
+      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      vsn.initialize();
+      vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
+      vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
+      vsn.retire(&cm);
+
+      const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
+      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
+      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
+
+      assert(num_medium_chunks == 0, "should not get any medium chunks");
+      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
+      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
+    }
+
+  }
+};
+
+void TestVirtualSpaceNode_test() {
+  TestVirtualSpaceNodeTest::test();
+}
+
 #endif
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Oct 24 16:25:24 2013 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Fri Oct 25 08:38:42 2013 -0700
@@ -5060,6 +5060,7 @@
 void TestVirtualSpace_test();
 void TestMetaspaceAux_test();
 void TestMetachunk_test();
+void TestVirtualSpaceNode_test();
 #if INCLUDE_ALL_GCS
 void TestG1BiasedArray_test();
 #endif
@@ -5072,6 +5073,7 @@
     run_unit_test(TestVirtualSpace_test());
     run_unit_test(TestMetaspaceAux_test());
     run_unit_test(TestMetachunk_test());
+    run_unit_test(TestVirtualSpaceNode_test());
     run_unit_test(GlobalDefinitions::test_globals());
     run_unit_test(GCTimerAllTest::all());
     run_unit_test(arrayOopDesc::test_max_array_length());
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Thu Oct 24 16:25:24 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Fri Oct 25 08:38:42 2013 -0700
@@ -225,18 +225,20 @@
   _method = NULL;
   _bci    = 0;
   _class_loader = NULL;
-#ifdef CHECK_UNHANDLED_OOPS
-  // This one is always allocated with new, but check it just in case.
-  Thread *thread = Thread::current();
-  if (thread->is_in_stack((address)&_method)) {
-    thread->allow_unhandled_oop((oop*)&_method);
-  }
-#endif // CHECK_UNHANDLED_OOPS
 }
 
 JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location) {
   _method        = m_method;
   _class_loader  = _method->method_holder()->class_loader_data()->class_loader();
+#ifdef CHECK_UNHANDLED_OOPS
+  // _class_loader can't be wrapped in a Handle, because JvmtiBreakpoint:s are
+  // eventually allocated on the heap.
+  //
+  // The code handling JvmtiBreakpoint:s allocated on the stack can't be
+  // interrupted by a GC until _class_loader is reachable by the GC via the
+  // oops_do method.
+  Thread::current()->allow_unhandled_oop(&_class_loader);
+#endif // CHECK_UNHANDLED_OOPS
   assert(_method != NULL, "_method != NULL");
   _bci           = (int) location;
   assert(_bci >= 0, "_bci >= 0");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/TestSystemGC.java	Fri Oct 25 08:38:42 2013 -0700
@@ -0,0 +1,46 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestSystemGC
+ * @key gc
+ * @summary Runs System.gc() with different flags.
+ * @run main/othervm TestSystemGC
+ * @run main/othervm -XX:+UseSerialGC TestSystemGC
+ * @run main/othervm -XX:+UseParNewGC TestSystemGC
+ * @run main/othervm -XX:+UseParallelGC TestSystemGC
+ * @run main/othervm -XX:+UseParallelGC -XX:-UseParallelOldGC TestSystemGC
+ * @run main/othervm -XX:+UseConcMarkSweepGC TestSystemGC
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent -XX:-UseParNewGC TestSystemGC
+ * @run main/othervm -XX:+UseG1GC TestSystemGC
+ * @run main/othervm -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
+ * @run main/othervm -XX:+UseLargePages TestSystemGC
+ * @run main/othervm -XX:+UseLargePages -XX:+UseLargePagesInMetaspace TestSystemGC
+ */
+
+public class TestSystemGC {
+  public static void main(String args[]) throws Exception {
+    System.gc();
+  }
+}