7085906: Replace the permgen allocated sentinelRef with a self-looped end
authorstefank
Thu, 01 Sep 2011 16:18:17 +0200
changeset 10524 6594ca81279a
parent 10522 23830453e083
child 10525 5e44fe6a4262
7085906: Replace the permgen allocated sentinelRef with a self-looped end Summary: Remove the sentinelRef and let the last Reference in a discovered chain point back to itself. Reviewed-by: ysr, jmasa
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
hotspot/src/share/vm/memory/genCollectedHeap.cpp
hotspot/src/share/vm/memory/referenceProcessor.cpp
hotspot/src/share/vm/memory/referenceProcessor.hpp
hotspot/src/share/vm/memory/sharedHeap.cpp
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -4613,7 +4613,6 @@
     // keep entries (which are added by the marking threads) on them
     // live until they can be processed at the end of marking.
     ref_processor()->weak_oops_do(&buf_scan_non_heap_roots);
-    ref_processor()->oops_do(&buf_scan_non_heap_roots);
   }
 
   // Finish up any enqueued closure apps (attributed as object copy time).
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -909,10 +909,6 @@
     }
     young_gen()->verify(allow_dirty);
   }
-  if (!silent) {
-    gclog_or_tty->print("ref_proc ");
-  }
-  ReferenceProcessor::verify();
 }
 
 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -80,10 +80,6 @@
       Universe::oops_do(&mark_and_push_closure);
       break;
 
-    case reference_processing:
-      ReferenceProcessor::oops_do(&mark_and_push_closure);
-      break;
-
     case jni_handles:
       JNIHandles::oops_do(&mark_and_push_closure);
       break;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Thu Sep 01 16:18:17 2011 +0200
@@ -98,8 +98,7 @@
     management            = 6,
     jvmti                 = 7,
     system_dictionary     = 8,
-    reference_processing  = 9,
-    code_cache            = 10
+    code_cache            = 9
   };
  private:
   RootType _root_type;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -516,7 +516,6 @@
   {
     ParallelScavengeHeap::ParStrongRootsScope psrs;
     Universe::oops_do(mark_and_push_closure());
-    ReferenceProcessor::oops_do(mark_and_push_closure());
     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
     CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
@@ -623,7 +622,6 @@
 
   // General strong roots.
   Universe::oops_do(adjust_root_pointer_closure());
-  ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   Threads::oops_do(adjust_root_pointer_closure(), NULL);
   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -2445,7 +2445,6 @@
 
   // General strong roots.
   Universe::oops_do(adjust_root_pointer_closure());
-  ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   Threads::oops_do(adjust_root_pointer_closure(), NULL);
   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -55,7 +55,6 @@
   switch (_root_type) {
     case universe:
       Universe::oops_do(&roots_closure);
-      ReferenceProcessor::oops_do(&roots_closure);
       break;
 
     case jni_handles:
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -1269,10 +1269,6 @@
     gclog_or_tty->print("remset ");
   }
   rem_set()->verify();
-  if (!silent) {
-     gclog_or_tty->print("ref_proc ");
-  }
-  ReferenceProcessor::verify();
 }
 
 void GenCollectedHeap::print() const { print_on(tty); }
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -35,7 +35,6 @@
 
 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
-oop              ReferenceProcessor::_sentinelRef = NULL;
 const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
 
 // List of discovered references.
@@ -43,7 +42,7 @@
 public:
   DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
   oop head() const     {
-     return UseCompressedOops ?  oopDesc::decode_heap_oop_not_null(_compressed_head) :
+     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
                                 _oop_head;
   }
   HeapWord* adr_head() {
@@ -53,12 +52,12 @@
   void   set_head(oop o) {
     if (UseCompressedOops) {
       // Must compress the head ptr.
-      _compressed_head = oopDesc::encode_heap_oop_not_null(o);
+      _compressed_head = oopDesc::encode_heap_oop(o);
     } else {
       _oop_head = o;
     }
   }
-  bool   empty() const          { return head() == ReferenceProcessor::sentinel_ref(); }
+  bool   empty() const          { return head() == NULL; }
   size_t length()               { return _len; }
   void   set_length(size_t len) { _len = len;  }
   void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
@@ -76,21 +75,9 @@
 }
 
 void ReferenceProcessor::init_statics() {
-  assert(_sentinelRef == NULL, "should be initialized precisely once");
-  EXCEPTION_MARK;
-  _sentinelRef = instanceKlass::cast(
-                    SystemDictionary::Reference_klass())->
-                      allocate_permanent_instance(THREAD);
-
   // Initialize the master soft ref clock.
   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
 
-  if (HAS_PENDING_EXCEPTION) {
-      Handle ex(THREAD, PENDING_EXCEPTION);
-      vm_exit_during_initialization(ex);
-  }
-  assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
-         "Just constructed it!");
   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
   _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
                                       NOT_COMPILER2(LRUCurrentHeapPolicy());
@@ -130,10 +117,9 @@
   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
-  assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
-  // Initialized all entries to _sentinelRef
+  // Initialized all entries to NULL
   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
-        _discoveredSoftRefs[i].set_head(sentinel_ref());
+    _discoveredSoftRefs[i].set_head(NULL);
     _discoveredSoftRefs[i].set_length(0);
   }
   // If we do barreirs, cache a copy of the barrier set.
@@ -167,10 +153,6 @@
   }
 }
 
-void ReferenceProcessor::oops_do(OopClosure* f) {
-  f->do_oop(adr_sentinel_ref());
-}
-
 void ReferenceProcessor::update_soft_ref_master_clock() {
   // Update (advance) the soft ref master clock field. This must be done
   // after processing the soft ref list.
@@ -283,8 +265,6 @@
   }
 #endif
   JNIHandles::weak_oops_do(is_alive, keep_alive);
-  // Finally remember to keep sentinel around
-  keep_alive->do_oop(adr_sentinel_ref());
   complete_gc->do_void();
 }
 
@@ -334,21 +314,22 @@
     gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
                            INTPTR_FORMAT, (address)refs_list.head());
   }
-  oop obj = refs_list.head();
+
+  oop obj = NULL;
+  oop next = refs_list.head();
   // Walk down the list, copying the discovered field into
-  // the next field and clearing it (except for the last
-  // non-sentinel object which is treated specially to avoid
-  // confusion with an active reference).
-  while (obj != sentinel_ref()) {
+  // the next field and clearing it.
+  while (obj != next) {
+    obj = next;
     assert(obj->is_instanceRef(), "should be reference object");
-    oop next = java_lang_ref_Reference::discovered(obj);
+    next = java_lang_ref_Reference::discovered(obj);
     if (TraceReferenceGC && PrintGCDetails) {
       gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
                              obj, next);
     }
     assert(java_lang_ref_Reference::next(obj) == NULL,
            "The reference should not be enqueued");
-    if (next == sentinel_ref()) {  // obj is last
+    if (next == obj) {  // obj is last
       // Swap refs_list into pendling_list_addr and
       // set obj's next to what we read from pending_list_addr.
       oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
@@ -366,7 +347,6 @@
       java_lang_ref_Reference::set_next(obj, next);
     }
     java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
-    obj = next;
   }
 }
 
@@ -376,10 +356,9 @@
   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
                      DiscoveredList      discovered_refs[],
                      HeapWord*           pending_list_addr,
-                     oop                 sentinel_ref,
                      int                 n_queues)
     : EnqueueTask(ref_processor, discovered_refs,
-                  pending_list_addr, sentinel_ref, n_queues)
+                  pending_list_addr, n_queues)
   { }
 
   virtual void work(unsigned int work_id) {
@@ -396,7 +375,7 @@
          j++, index += _n_queues) {
       _ref_processor.enqueue_discovered_reflist(
         _refs_lists[index], _pending_list_addr);
-      _refs_lists[index].set_head(_sentinel_ref);
+      _refs_lists[index].set_head(NULL);
       _refs_lists[index].set_length(0);
     }
   }
@@ -408,13 +387,13 @@
   if (_processing_is_mt && task_executor != NULL) {
     // Parallel code
     RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
-                           pending_list_addr, sentinel_ref(), _max_num_q);
+                           pending_list_addr, _max_num_q);
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
     for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
-      _discoveredSoftRefs[i].set_head(sentinel_ref());
+      _discoveredSoftRefs[i].set_head(NULL);
       _discoveredSoftRefs[i].set_length(0);
     }
   }
@@ -428,7 +407,7 @@
                                 BoolObjectClosure* is_alive);
 
   // End Of List.
-  inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
+  inline bool has_next() const { return _ref != NULL; }
 
   // Get oop to the Reference object.
   inline oop obj() const { return _ref; }
@@ -468,9 +447,13 @@
   inline void update_discovered() {
     // First _prev_next ref actually points into DiscoveredList (gross).
     if (UseCompressedOops) {
-      _keep_alive->do_oop((narrowOop*)_prev_next);
+      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
+        _keep_alive->do_oop((narrowOop*)_prev_next);
+      }
     } else {
-      _keep_alive->do_oop((oop*)_prev_next);
+      if (!oopDesc::is_null(*(oop*)_prev_next)) {
+        _keep_alive->do_oop((oop*)_prev_next);
+      }
     }
   }
 
@@ -488,6 +471,7 @@
 private:
   DiscoveredList&    _refs_list;
   HeapWord*          _prev_next;
+  oop                _prev;
   oop                _ref;
   HeapWord*          _discovered_addr;
   oop                _next;
@@ -509,6 +493,7 @@
                                                       BoolObjectClosure* is_alive)
   : _refs_list(refs_list),
     _prev_next(refs_list.adr_head()),
+    _prev(NULL),
     _ref(refs_list.head()),
 #ifdef ASSERT
     _first_seen(refs_list.head()),
@@ -517,7 +502,7 @@
     _processed(0),
     _removed(0),
 #endif
-    _next(refs_list.head()),
+    _next(NULL),
     _keep_alive(keep_alive),
     _is_alive(is_alive)
 { }
@@ -544,26 +529,43 @@
 
 inline void DiscoveredListIterator::next() {
   _prev_next = _discovered_addr;
+  _prev = _ref;
   move_to_next();
 }
 
 inline void DiscoveredListIterator::remove() {
   assert(_ref->is_oop(), "Dropping a bad reference");
   oop_store_raw(_discovered_addr, NULL);
+
   // First _prev_next ref actually points into DiscoveredList (gross).
+  oop new_next;
+  if (_next == _ref) {
+    // At the end of the list, we should make _prev point to itself.
+    // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
+    // and _prev will be NULL.
+    new_next = _prev;
+  } else {
+    new_next = _next;
+  }
+
   if (UseCompressedOops) {
     // Remove Reference object from list.
-    oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
+    oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
   } else {
     // Remove Reference object from list.
-    oopDesc::store_heap_oop((oop*)_prev_next, _next);
+    oopDesc::store_heap_oop((oop*)_prev_next, new_next);
   }
   NOT_PRODUCT(_removed++);
   _refs_list.dec_length(1);
 }
 
 inline void DiscoveredListIterator::move_to_next() {
-  _ref = _next;
+  if (_ref == _next) {
+    // End of the list.
+    _ref = NULL;
+  } else {
+    _ref = _next;
+  }
   assert(_ref != _first_seen, "cyclic ref_list found");
   NOT_PRODUCT(_processed++);
 }
@@ -725,24 +727,30 @@
     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
     iter.next();
   }
-  // Remember to keep sentinel pointer around
+  // Remember to update the next pointer of the last ref.
   iter.update_discovered();
   // Close the reachable set
   complete_gc->do_void();
 }
 
 void
-ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
-  oop obj = refs_list.head();
-  while (obj != sentinel_ref()) {
-    oop discovered = java_lang_ref_Reference::discovered(obj);
+ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) {
+  oop obj = NULL;
+  oop next = refs_list.head();
+  while (next != obj) {
+    obj = next;
+    next = java_lang_ref_Reference::discovered(obj);
     java_lang_ref_Reference::set_discovered_raw(obj, NULL);
-    obj = discovered;
   }
-  refs_list.set_head(sentinel_ref());
+  refs_list.set_head(NULL);
   refs_list.set_length(0);
 }
 
+void
+ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
+  clear_discovered_references(refs_list);
+}
+
 void ReferenceProcessor::abandon_partial_discovery() {
   // loop over the lists
   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
@@ -859,6 +867,9 @@
           refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
                               avg_refs - ref_lists[to_idx].length());
         }
+
+        assert(refs_to_move > 0, "otherwise the code below will fail");
+
         oop move_head = ref_lists[from_idx].head();
         oop move_tail = move_head;
         oop new_head  = move_head;
@@ -867,10 +878,24 @@
           move_tail = new_head;
           new_head = java_lang_ref_Reference::discovered(new_head);
         }
-        java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
+
+        // Add the chain to the to list.
+        if (ref_lists[to_idx].head() == NULL) {
+          // to list is empty. Make a loop at the end.
+          java_lang_ref_Reference::set_discovered(move_tail, move_tail);
+        } else {
+          java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
+        }
         ref_lists[to_idx].set_head(move_head);
         ref_lists[to_idx].inc_length(refs_to_move);
-        ref_lists[from_idx].set_head(new_head);
+
+        // Remove the chain from the from list.
+        if (move_tail == new_head) {
+          // We found the end of the from list.
+          ref_lists[from_idx].set_head(NULL);
+        } else {
+          ref_lists[from_idx].set_head(new_head);
+        }
         ref_lists[from_idx].dec_length(refs_to_move);
         if (ref_lists[from_idx].length() == 0) {
           break;
@@ -1082,6 +1107,8 @@
   // First we must make sure this object is only enqueued once. CAS in a non null
   // discovered_addr.
   oop current_head = refs_list.head();
+  // The last ref must have its discovered field pointing to itself.
+  oop next_discovered = (current_head != NULL) ? current_head : obj;
 
   // Note: In the case of G1, this specific pre-barrier is strictly
   // not necessary because the only case we are interested in
@@ -1091,13 +1118,13 @@
   // collector that might have need for a pre-barrier here.
   if (_discovered_list_needs_barrier && !UseG1GC) {
     if (UseCompressedOops) {
-      _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
+      _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered);
     } else {
-      _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
+      _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered);
     }
     guarantee(false, "Need to check non-G1 collector");
   }
-  oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
+  oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
                                                     NULL);
   if (retest == NULL) {
     // This thread just won the right to enqueue the object.
@@ -1106,7 +1133,7 @@
     refs_list.set_head(obj);
     refs_list.inc_length(1);
     if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, current_head);
+      _bs->write_ref_field((void*)discovered_addr, next_discovered);
     }
 
     if (TraceReferenceGC) {
@@ -1262,20 +1289,23 @@
     // here: the field will be visited later when processing the discovered
     // references.
     oop current_head = list->head();
+    // The last ref must have its discovered field pointing to itself.
+    oop next_discovered = (current_head != NULL) ? current_head : obj;
+
     // As in the case further above, since we are over-writing a NULL
     // pre-value, we can safely elide the pre-barrier here for the case of G1.
     assert(discovered == NULL, "control point invariant");
     if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
       if (UseCompressedOops) {
-        _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
+        _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered);
       } else {
-        _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
+        _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered);
       }
       guarantee(false, "Need to check non-G1 collector");
     }
-    oop_store_raw(discovered_addr, current_head);
+    oop_store_raw(discovered_addr, next_discovered);
     if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, current_head);
+      _bs->write_ref_field((void*)discovered_addr, next_discovered);
     }
     list->set_head(obj);
     list->inc_length(1);
@@ -1437,22 +1467,12 @@
 }
 #endif
 
-void ReferenceProcessor::verify() {
-  guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
-}
-
 #ifndef PRODUCT
 void ReferenceProcessor::clear_discovered_references() {
   guarantee(!_discovering_refs, "Discovering refs?");
   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
-    oop obj = _discoveredSoftRefs[i].head();
-    while (obj != sentinel_ref()) {
-      oop next = java_lang_ref_Reference::discovered(obj);
-      java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
-      obj = next;
-    }
-    _discoveredSoftRefs[i].set_head(sentinel_ref());
-    _discoveredSoftRefs[i].set_length(0);
+    clear_discovered_references(_discoveredSoftRefs[i]);
   }
 }
+
 #endif // PRODUCT
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Thu Sep 01 16:18:17 2011 +0200
@@ -52,8 +52,6 @@
 
 class ReferenceProcessor : public CHeapObj {
  protected:
-  // End of list marker
-  static oop  _sentinelRef;
   MemRegion   _span; // (right-open) interval of heap
                      // subject to wkref discovery
   bool        _discovering_refs;      // true when discovery enabled
@@ -106,8 +104,6 @@
   int max_num_q()                        { return _max_num_q; }
   void set_active_mt_degree(int v)       { _num_q = v; }
   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
-  static oop  sentinel_ref()             { return _sentinelRef; }
-  static oop* adr_sentinel_ref()         { return &_sentinelRef; }
   ReferencePolicy* setup_policy(bool always_clear) {
     _current_soft_ref_policy = always_clear ?
       _always_clear_soft_ref_policy : _default_soft_ref_policy;
@@ -230,6 +226,7 @@
                                         HeapWord* discovered_addr);
   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 
+  void clear_discovered_references(DiscoveredList& refs_list);
   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 
   // Calculate the number of jni handles.
@@ -314,7 +311,6 @@
 
   // iterate over oops
   void weak_oops_do(OopClosure* f);       // weak roots
-  static void oops_do(OopClosure* f);     // strong root(s)
 
   // Balance each of the discovered lists.
   void balance_all_queues();
@@ -340,7 +336,6 @@
   // debugging
   void verify_no_references_recorded() PRODUCT_RETURN;
   void verify_referent(oop obj)        PRODUCT_RETURN;
-  static void verify();
 
   // clear the discovered lists (unlinking each entry).
   void clear_discovered_references() PRODUCT_RETURN;
@@ -524,12 +519,10 @@
   EnqueueTask(ReferenceProcessor& ref_processor,
               DiscoveredList      refs_lists[],
               HeapWord*           pending_list_addr,
-              oop                 sentinel_ref,
               int                 n_queues)
     : _ref_processor(ref_processor),
       _refs_lists(refs_lists),
       _pending_list_addr(pending_list_addr),
-      _sentinel_ref(sentinel_ref),
       _n_queues(n_queues)
   { }
 
@@ -540,7 +533,6 @@
   ReferenceProcessor& _ref_processor;
   DiscoveredList*     _refs_lists;
   HeapWord*           _pending_list_addr;
-  oop                 _sentinel_ref;
   int                 _n_queues;
 };
 
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Tue Sep 06 21:03:51 2011 -0700
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Thu Sep 01 16:18:17 2011 +0200
@@ -146,7 +146,6 @@
   assert(_strong_roots_parity != 0, "must have called prologue code");
   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
     Universe::oops_do(roots);
-    ReferenceProcessor::oops_do(roots);
     // Consider perm-gen discovered lists to be strong.
     perm_gen()->ref_processor()->weak_oops_do(roots);
   }