Merge
authorjcoomes
Fri, 08 Jun 2012 09:49:49 -0700
changeset 12936 d823bd26641a
parent 12932 16599c579430 (current diff)
parent 12935 2ae81fce3911 (diff)
child 12938 fb5532132983
child 12942 b69b4fb3954e
child 12951 fd152ca7ec9a
Merge
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Jun 04 10:22:37 2012 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Jun 08 09:49:49 2012 -0700
@@ -293,7 +293,7 @@
     // Java thread is waiting for a full GC to happen (e.g., it
     // called System.gc() with +ExplicitGCInvokesConcurrent).
     _sts.join();
-    g1h->increment_full_collections_completed(true /* concurrent */);
+    g1h->increment_old_marking_cycles_completed(true /* concurrent */);
     _sts.leave();
   }
   assert(_should_terminate, "just checking");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Jun 04 10:22:37 2012 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jun 08 09:49:49 2012 -0700
@@ -1299,6 +1299,7 @@
 
     gc_prologue(true);
     increment_total_collections(true /* full gc */);
+    increment_old_marking_cycles_started();
 
     size_t g1h_prev_used = used();
     assert(used() == recalculate_used(), "Should be equal");
@@ -1492,22 +1493,28 @@
     JavaThread::dirty_card_queue_set().abandon_logs();
     assert(!G1DeferredRSUpdate
            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
-  }
-
-  _young_list->reset_sampled_info();
-  // At this point there should be no regions in the
-  // entire heap tagged as young.
-  assert( check_young_list_empty(true /* check_heap */),
-    "young list should be empty at this point");
-
-  // Update the number of full collections that have been completed.
-  increment_full_collections_completed(false /* concurrent */);
-
-  _hrs.verify_optional();
-  verify_region_sets_optional();
-
-  print_heap_after_gc();
-  g1mm()->update_sizes();
+
+    _young_list->reset_sampled_info();
+    // At this point there should be no regions in the
+    // entire heap tagged as young.
+    assert( check_young_list_empty(true /* check_heap */),
+      "young list should be empty at this point");
+
+    // Update the number of full collections that have been completed.
+    increment_old_marking_cycles_completed(false /* concurrent */);
+
+    _hrs.verify_optional();
+    verify_region_sets_optional();
+
+    print_heap_after_gc();
+
+    // We must call G1MonitoringSupport::update_sizes() in the same scoping level
+    // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
+    // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
+    // before any GC notifications are raised.
+    g1mm()->update_sizes();
+  }
+
   post_full_gc_dump();
 
   return true;
@@ -1888,7 +1895,8 @@
   _retained_old_gc_alloc_region(NULL),
   _expand_heap_after_alloc_failure(true),
   _surviving_young_words(NULL),
-  _full_collections_completed(0),
+  _old_marking_cycles_started(0),
+  _old_marking_cycles_completed(0),
   _in_cset_fast_test(NULL),
   _in_cset_fast_test_base(NULL),
   _dirty_cards_region_list(NULL),
@@ -2360,7 +2368,16 @@
 }
 #endif // !PRODUCT
 
-void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
+void G1CollectedHeap::increment_old_marking_cycles_started() {
+  assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
+    _old_marking_cycles_started == _old_marking_cycles_completed + 1,
+    err_msg("Wrong marking cycle count (started: %d, completed: %d)",
+    _old_marking_cycles_started, _old_marking_cycles_completed));
+
+  _old_marking_cycles_started++;
+}
+
+void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 
   // We assume that if concurrent == true, then the caller is a
@@ -2368,11 +2385,6 @@
   // Set. If there's ever a cheap way to check this, we should add an
   // assert here.
 
-  // We have already incremented _total_full_collections at the start
-  // of the GC, so total_full_collections() represents how many full
-  // collections have been started.
-  unsigned int full_collections_started = total_full_collections();
-
   // Given that this method is called at the end of a Full GC or of a
   // concurrent cycle, and those can be nested (i.e., a Full GC can
   // interrupt a concurrent cycle), the number of full collections
@@ -2382,21 +2394,21 @@
 
   // This is the case for the inner caller, i.e. a Full GC.
   assert(concurrent ||
-         (full_collections_started == _full_collections_completed + 1) ||
-         (full_collections_started == _full_collections_completed + 2),
-         err_msg("for inner caller (Full GC): full_collections_started = %u "
-                 "is inconsistent with _full_collections_completed = %u",
-                 full_collections_started, _full_collections_completed));
+         (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
+         (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
+         err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
+                 "is inconsistent with _old_marking_cycles_completed = %u",
+                 _old_marking_cycles_started, _old_marking_cycles_completed));
 
   // This is the case for the outer caller, i.e. the concurrent cycle.
   assert(!concurrent ||
-         (full_collections_started == _full_collections_completed + 1),
+         (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
          err_msg("for outer caller (concurrent cycle): "
-                 "full_collections_started = %u "
-                 "is inconsistent with _full_collections_completed = %u",
-                 full_collections_started, _full_collections_completed));
-
-  _full_collections_completed += 1;
+                 "_old_marking_cycles_started = %u "
+                 "is inconsistent with _old_marking_cycles_completed = %u",
+                 _old_marking_cycles_started, _old_marking_cycles_completed));
+
+  _old_marking_cycles_completed += 1;
 
   // We need to clear the "in_progress" flag in the CM thread before
   // we wake up any waiters (especially when ExplicitInvokesConcurrent
@@ -2432,7 +2444,7 @@
   assert_heap_not_locked();
 
   unsigned int gc_count_before;
-  unsigned int full_gc_count_before;
+  unsigned int old_marking_count_before;
   bool retry_gc;
 
   do {
@@ -2443,7 +2455,7 @@
 
       // Read the GC count while holding the Heap_lock
       gc_count_before = total_collections();
-      full_gc_count_before = total_full_collections();
+      old_marking_count_before = _old_marking_cycles_started;
     }
 
     if (should_do_concurrent_full_gc(cause)) {
@@ -2458,7 +2470,7 @@
 
       VMThread::execute(&op);
       if (!op.pause_succeeded()) {
-        if (full_gc_count_before == total_full_collections()) {
+        if (old_marking_count_before == _old_marking_cycles_started) {
           retry_gc = op.should_retry_gc();
         } else {
           // A Full GC happened while we were trying to schedule the
@@ -2486,7 +2498,7 @@
         VMThread::execute(&op);
       } else {
         // Schedule a Full GC.
-        VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
+        VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
         VMThread::execute(&op);
       }
     }
@@ -3613,7 +3625,7 @@
     if (g1_policy()->during_initial_mark_pause()) {
       // We are about to start a marking cycle, so we increment the
       // full collection counter.
-      increment_total_full_collections();
+      increment_old_marking_cycles_started();
     }
     // if the log level is "finer" is on, we'll print long statistics information
     // in the collector policy code, so let's not print this as the output
@@ -3930,25 +3942,30 @@
 
       gc_epilogue(false);
     }
-  }
-
-  // The closing of the inner scope, immediately above, will complete
-  // logging at the "fine" level. The record_collection_pause_end() call
-  // above will complete logging at the "finer" level.
-  //
-  // It is not yet to safe, however, to tell the concurrent mark to
-  // start as we have some optional output below. We don't want the
-  // output from the concurrent mark thread interfering with this
-  // logging output either.
-
-  _hrs.verify_optional();
-  verify_region_sets_optional();
-
-  TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
-  TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
-
-  print_heap_after_gc();
-  g1mm()->update_sizes();
+
+    // The closing of the inner scope, immediately above, will complete
+    // logging at the "fine" level. The record_collection_pause_end() call
+    // above will complete logging at the "finer" level.
+    //
+    // It is not yet to safe, however, to tell the concurrent mark to
+    // start as we have some optional output below. We don't want the
+    // output from the concurrent mark thread interfering with this
+    // logging output either.
+
+    _hrs.verify_optional();
+    verify_region_sets_optional();
+
+    TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
+    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
+
+    print_heap_after_gc();
+
+    // We must call G1MonitoringSupport::update_sizes() in the same scoping level
+    // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
+    // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
+    // before any GC notifications are raised.
+    g1mm()->update_sizes();
+  }
 
   if (G1SummarizeRSetStats &&
       (G1SummarizeRSetStatsPeriod > 0) &&
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Jun 04 10:22:37 2012 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jun 08 09:49:49 2012 -0700
@@ -359,10 +359,13 @@
   // (c) cause == _g1_humongous_allocation
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
-  // Keeps track of how many "full collections" (i.e., Full GCs or
-  // concurrent cycles) we have completed. The number of them we have
-  // started is maintained in _total_full_collections in CollectedHeap.
-  volatile unsigned int _full_collections_completed;
+  // Keeps track of how many "old marking cycles" (i.e., Full GCs or
+  // concurrent cycles) we have started.
+  volatile unsigned int _old_marking_cycles_started;
+
+  // Keeps track of how many "old marking cycles" (i.e., Full GCs or
+  // concurrent cycles) we have completed.
+  volatile unsigned int _old_marking_cycles_completed;
 
   // This is a non-product method that is helpful for testing. It is
   // called at the end of a GC and artificially expands the heap by
@@ -673,8 +676,12 @@
            (size_t) _in_cset_fast_test_length * sizeof(bool));
   }
 
+  // This is called at the start of either a concurrent cycle or a Full
+  // GC to update the number of old marking cycles started.
+  void increment_old_marking_cycles_started();
+
   // This is called at the end of either a concurrent cycle or a Full
-  // GC to update the number of full collections completed. Those two
+  // GC to update the number of old marking cycles completed. Those two
   // can happen in a nested fashion, i.e., we start a concurrent
   // cycle, a Full GC happens half-way through it which ends first,
   // and then the cycle notices that a Full GC happened and ends
@@ -683,14 +690,14 @@
   // false, the caller is the inner caller in the nesting (i.e., the
   // Full GC). If concurrent is true, the caller is the outer caller
   // in this nesting (i.e., the concurrent cycle). Further nesting is
-  // not currently supported. The end of the this call also notifies
+  // not currently supported. The end of this call also notifies
   // the FullGCCount_lock in case a Java thread is waiting for a full
   // GC to happen (e.g., it called System.gc() with
   // +ExplicitGCInvokesConcurrent).
-  void increment_full_collections_completed(bool concurrent);
+  void increment_old_marking_cycles_completed(bool concurrent);
 
-  unsigned int full_collections_completed() {
-    return _full_collections_completed;
+  unsigned int old_marking_cycles_completed() {
+    return _old_marking_cycles_completed;
   }
 
   G1HRPrinter* hr_printer() { return &_hr_printer; }
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Jun 04 10:22:37 2012 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Jun 08 09:49:49 2012 -0700
@@ -64,7 +64,7 @@
     _should_initiate_conc_mark(should_initiate_conc_mark),
     _target_pause_time_ms(target_pause_time_ms),
     _should_retry_gc(false),
-    _full_collections_completed_before(0) {
+    _old_marking_cycles_completed_before(0) {
   guarantee(target_pause_time_ms > 0.0,
             err_msg("target_pause_time_ms = %1.6lf should be positive",
                     target_pause_time_ms));
@@ -112,11 +112,11 @@
 
   GCCauseSetter x(g1h, _gc_cause);
   if (_should_initiate_conc_mark) {
-    // It's safer to read full_collections_completed() here, given
+    // It's safer to read old_marking_cycles_completed() here, given
     // that noone else will be updating it concurrently. Since we'll
     // only need it if we're initiating a marking cycle, no point in
     // setting it earlier.
-    _full_collections_completed_before = g1h->full_collections_completed();
+    _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
 
     // At this point we are supposed to start a concurrent cycle. We
     // will do so if one is not already in progress.
@@ -181,17 +181,17 @@
 
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-    // In the doit() method we saved g1h->full_collections_completed()
-    // in the _full_collections_completed_before field. We have to
-    // wait until we observe that g1h->full_collections_completed()
+    // In the doit() method we saved g1h->old_marking_cycles_completed()
+    // in the _old_marking_cycles_completed_before field. We have to
+    // wait until we observe that g1h->old_marking_cycles_completed()
     // has increased by at least one. This can happen if a) we started
     // a cycle and it completes, b) a cycle already in progress
     // completes, or c) a Full GC happens.
 
     // If the condition has already been reached, there's no point in
     // actually taking the lock and doing the wait.
-    if (g1h->full_collections_completed() <=
-                                          _full_collections_completed_before) {
+    if (g1h->old_marking_cycles_completed() <=
+                                          _old_marking_cycles_completed_before) {
       // The following is largely copied from CMS
 
       Thread* thr = Thread::current();
@@ -200,8 +200,8 @@
       ThreadToNativeFromVM native(jt);
 
       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-      while (g1h->full_collections_completed() <=
-                                          _full_collections_completed_before) {
+      while (g1h->old_marking_cycles_completed() <=
+                                          _old_marking_cycles_completed_before) {
         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
       }
     }
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Mon Jun 04 10:22:37 2012 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Jun 08 09:49:49 2012 -0700
@@ -80,7 +80,7 @@
   bool         _should_initiate_conc_mark;
   bool         _should_retry_gc;
   double       _target_pause_time_ms;
-  unsigned int _full_collections_completed_before;
+  unsigned int _old_marking_cycles_completed_before;
 public:
   VM_G1IncCollectionPause(unsigned int   gc_count_before,
                           size_t         word_size,
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Mon Jun 04 10:22:37 2012 -0400
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Jun 08 09:49:49 2012 -0700
@@ -230,7 +230,7 @@
   link_tail(chunk);
 
   assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
-  FreeList<Chunk>::increment_count();
+  increment_count();
   debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -258,7 +258,7 @@
   }
   head()->link_after(chunk);
   assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
-  FreeList<Chunk>::increment_count();
+  increment_count();
   debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -909,6 +909,7 @@
 
 template <class Chunk>
 class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> {
+  using TreeCensusClosure<Chunk>::do_list;
  public:
   void do_tree(TreeList<Chunk>* tl) {
     if (tl != NULL) {
@@ -921,6 +922,7 @@
 
 template <class Chunk>
 class DescendTreeCensusClosure : public TreeCensusClosure<Chunk> {
+  using TreeCensusClosure<Chunk>::do_list;
  public:
   void do_tree(TreeList<Chunk>* tl) {
     if (tl != NULL) {
@@ -987,6 +989,7 @@
 
 template <class Chunk>
 class DescendTreeSearchClosure : public TreeSearchClosure<Chunk> {
+  using TreeSearchClosure<Chunk>::do_list;
  public:
   bool do_tree(TreeList<Chunk>* tl) {
     if (tl != NULL) {
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp	Mon Jun 04 10:22:37 2012 -0400
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp	Fri Jun 08 09:49:49 2012 -0700
@@ -60,13 +60,18 @@
   TreeList<Chunk>* left()   const { return _left;   }
   TreeList<Chunk>* right()  const { return _right;  }
 
-  // Wrapper on call to base class, to get the template to compile.
-  Chunk* head() const { return FreeList<Chunk>::head(); }
-  Chunk* tail() const { return FreeList<Chunk>::tail(); }
-  void set_head(Chunk* head) { FreeList<Chunk>::set_head(head); }
-  void set_tail(Chunk* tail) { FreeList<Chunk>::set_tail(tail); }
+  // Explicitly import these names into our namespace to fix name lookup with templates
+  using FreeList<Chunk>::head;
+  using FreeList<Chunk>::set_head;
 
-  size_t size() const { return FreeList<Chunk>::size(); }
+  using FreeList<Chunk>::tail;
+  using FreeList<Chunk>::set_tail;
+  using FreeList<Chunk>::link_tail;
+
+  using FreeList<Chunk>::increment_count;
+  NOT_PRODUCT(using FreeList<Chunk>::increment_returned_bytes_by;)
+  using FreeList<Chunk>::verify_chunk_in_free_list;
+  using FreeList<Chunk>::size;
 
   // Accessors for links in tree.