8029255: G1: Reference processing should not enqueue references on the shared SATB queue
authorpliden
Fri, 10 Jan 2014 09:53:53 +0100
changeset 22496 383a5bdef99d
parent 22206 dfbfc5483c3e
child 22497 b2e1896e9616
8029255: G1: Reference processing should not enqueue references on the shared SATB queue Reviewed-by: brutisso, tschatzl
hotspot/src/share/vm/memory/referenceProcessor.cpp
hotspot/src/share/vm/memory/referenceProcessor.hpp
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Tue Jan 07 16:15:35 2014 +0100
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Fri Jan 10 09:53:53 2014 +0100
@@ -100,7 +100,6 @@
   _enqueuing_is_done(false),
   _is_alive_non_header(is_alive_non_header),
   _discovered_list_needs_barrier(discovered_list_needs_barrier),
-  _bs(NULL),
   _processing_is_mt(mt_processing),
   _next_id(0)
 {
@@ -126,10 +125,6 @@
     _discovered_refs[i].set_length(0);
   }
 
-  // If we do barriers, cache a copy of the barrier set.
-  if (discovered_list_needs_barrier) {
-    _bs = Universe::heap()->barrier_set();
-  }
   setup_policy(false /* default soft ref policy */);
 }
 
@@ -317,13 +312,9 @@
   // Enqueue references that are not made active again, and
   // clear the decks for the next collection (cycle).
   ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
-  // Do the oop-check on pending_list_addr missed in
-  // enqueue_discovered_reflist. We should probably
-  // do a raw oop_check so that future such idempotent
-  // oop_stores relying on the oop-check side-effect
-  // may be elided automatically and safely without
-  // affecting correctness.
-  oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
+  // Do the post-barrier on pending_list_addr missed in
+  // enqueue_discovered_reflist.
+  oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
 
   // Stop treating discovered references specially.
   ref->disable_discovery();
@@ -372,15 +363,17 @@
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "Reference not active; should not be discovered");
       // Self-loop next, so as to make Ref not active.
-      java_lang_ref_Reference::set_next(obj, obj);
+      // Post-barrier not needed when looping to self.
+      java_lang_ref_Reference::set_next_raw(obj, obj);
       if (next_d == obj) {  // obj is last
         // Swap refs_list into pendling_list_addr and
         // set obj's discovered to what we read from pending_list_addr.
         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
-        // Need oop_check on pending_list_addr above;
-        // see special oop-check code at the end of
+        // Need post-barrier on pending_list_addr above;
+        // see special post-barrier code at the end of
         // enqueue_discovered_reflists() further below.
-        java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL
+        java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
+        oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
       }
     }
   } else { // Old behaviour
@@ -516,13 +509,11 @@
   // the reference object and will fail
   // CT verification.
   if (UseG1GC) {
-    BarrierSet* bs = oopDesc::bs();
     HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
-
     if (UseCompressedOops) {
-      bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
+      oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL);
     } else {
-      bs->write_ref_field_pre((oop*)next_addr, NULL);
+      oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL);
     }
     java_lang_ref_Reference::set_next_raw(_ref, NULL);
   } else {
@@ -790,10 +781,9 @@
 };
 
 void ReferenceProcessor::set_discovered(oop ref, oop value) {
+  java_lang_ref_Reference::set_discovered_raw(ref, value);
   if (_discovered_list_needs_barrier) {
-    java_lang_ref_Reference::set_discovered(ref, value);
-  } else {
-    java_lang_ref_Reference::set_discovered_raw(ref, value);
+    oopDesc::bs()->write_ref_field(ref, value);
   }
 }
 
@@ -1085,7 +1075,7 @@
   // so this will expand to nothing. As a result, we have manually
   // elided this out for G1, but left in the test for some future
   // collector that might have need for a pre-barrier here, e.g.:-
-  // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
+  // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
   assert(!_discovered_list_needs_barrier || UseG1GC,
          "Need to check non-G1 collector: "
          "may need a pre-write-barrier for CAS from NULL below");
@@ -1098,7 +1088,7 @@
     refs_list.set_head(obj);
     refs_list.inc_length(1);
     if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, next_discovered);
+      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
     }
 
     if (TraceReferenceGC) {
@@ -1260,13 +1250,13 @@
 
     // As in the case further above, since we are over-writing a NULL
     // pre-value, we can safely elide the pre-barrier here for the case of G1.
-    // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
+    // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
     assert(discovered == NULL, "control point invariant");
     assert(!_discovered_list_needs_barrier || UseG1GC,
            "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
     oop_store_raw(discovered_addr, next_discovered);
     if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, next_discovered);
+      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
     }
     list->set_head(obj);
     list->inc_length(1);
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Tue Jan 07 16:15:35 2014 +0100
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Fri Jan 10 09:53:53 2014 +0100
@@ -235,7 +235,6 @@
   // discovery.)
   bool        _discovered_list_needs_barrier;
 
-  BarrierSet* _bs;                      // Cached copy of BarrierSet.
   bool        _enqueuing_is_done;       // true if all weak references enqueued
   bool        _processing_is_mt;        // true during phases when
                                         // reference processing is MT.
@@ -420,25 +419,6 @@
   void update_soft_ref_master_clock();
 
  public:
-  // constructor
-  ReferenceProcessor():
-    _span((HeapWord*)NULL, (HeapWord*)NULL),
-    _discovered_refs(NULL),
-    _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
-    _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
-    _discovering_refs(false),
-    _discovery_is_atomic(true),
-    _enqueuing_is_done(false),
-    _discovery_is_mt(false),
-    _discovered_list_needs_barrier(false),
-    _bs(NULL),
-    _is_alive_non_header(NULL),
-    _num_q(0),
-    _max_num_q(0),
-    _processing_is_mt(false),
-    _next_id(0)
-  { }
-
   // Default parameters give you a vanilla reference processor.
   ReferenceProcessor(MemRegion span,
                      bool mt_processing = false, uint mt_processing_degree = 1,