Merge
authoriveresov
Wed, 26 Nov 2008 09:24:57 -0800
changeset 1608 0a375b5fb8e1
parent 1604 6d43194ed39c (current diff)
parent 1607 be7d05bc07b2 (diff)
child 1609 5eacab926e22
child 1610 5dddd195cc86
Merge
hotspot/src/share/vm/runtime/globals.hpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Wed Nov 26 09:24:57 2008 -0800
@@ -325,24 +325,30 @@
 // For objects in CMS generation, this closure marks
 // given objects (transitively) as being reachable/live.
 // This is currently used during the (weak) reference object
-// processing phase of the CMS final checkpoint step.
+// processing phase of the CMS final checkpoint step, as
+// well as during the concurrent precleaning of the discovered
+// reference lists.
 class CMSKeepAliveClosure: public OopClosure {
  private:
   CMSCollector* _collector;
   const MemRegion _span;
   CMSMarkStack* _mark_stack;
   CMSBitMap*    _bit_map;
+  bool          _concurrent_precleaning;
  protected:
   DO_OOP_WORK_DEFN
  public:
   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
-                      CMSBitMap* bit_map, CMSMarkStack* mark_stack):
+                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
+                      bool cpc):
     _collector(collector),
     _span(span),
     _bit_map(bit_map),
-    _mark_stack(mark_stack) {
+    _mark_stack(mark_stack),
+    _concurrent_precleaning(cpc) {
     assert(!_span.is_empty(), "Empty span could spell trouble");
   }
+  bool    concurrent_precleaning() const { return _concurrent_precleaning; }
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -538,6 +538,7 @@
   _survivor_chunk_capacity(0), // -- ditto --
   _survivor_chunk_index(0),    // -- ditto --
   _ser_pmc_preclean_ovflw(0),
+  _ser_kac_preclean_ovflw(0),
   _ser_pmc_remark_ovflw(0),
   _par_pmc_remark_ovflw(0),
   _ser_kac_ovflw(0),
@@ -1960,6 +1961,7 @@
 
   ref_processor()->set_enqueuing_is_done(false);
   ref_processor()->enable_discovery();
+  ref_processor()->snap_policy(clear_all_soft_refs);
   // If an asynchronous collection finishes, the _modUnionTable is
   // all clear.  If we are assuming the collection from an asynchronous
   // collection, clear the _modUnionTable.
@@ -2383,6 +2385,9 @@
     Universe::verify(true);
   }
 
+  // Snapshot the soft reference policy to be used in this collection cycle.
+  ref_processor()->snap_policy(clear_all_soft_refs);
+
   bool init_mark_was_synchronous = false; // until proven otherwise
   while (_collectorState != Idling) {
     if (TraceCMSState) {
@@ -4388,10 +4393,10 @@
     CMSPrecleanRefsYieldClosure yield_cl(this);
     assert(rp->span().equals(_span), "Spans should be equal");
     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
-                                   &_markStack);
+                                   &_markStack, true /* preclean */);
     CMSDrainMarkingStackClosure complete_trace(this,
-                                  _span, &_markBitMap, &_markStack,
-                                  &keep_alive);
+                                   _span, &_markBitMap, &_markStack,
+                                   &keep_alive, true /* preclean */);
 
     // We don't want this step to interfere with a young
     // collection because we don't want to take CPU
@@ -4590,11 +4595,11 @@
     if (!dirtyRegion.is_empty()) {
       assert(numDirtyCards > 0, "consistency check");
       HeapWord* stop_point = NULL;
+      stopTimer();
+      CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
+                               bitMapLock());
+      startTimer();
       {
-        stopTimer();
-        CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
-                                 bitMapLock());
-        startTimer();
         verify_work_stacks_empty();
         verify_overflow_empty();
         sample_eden();
@@ -4611,10 +4616,6 @@
         assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
                (_collectorState == AbortablePreclean && should_abort_preclean()),
                "Unparsable objects should only be in perm gen.");
-
-        stopTimer();
-        CMSTokenSyncWithLocks ts(true, bitMapLock());
-        startTimer();
         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
         if (should_abort_preclean()) {
           break; // out of preclean loop
@@ -4852,17 +4853,19 @@
   // recurrence of that condition.
   assert(_markStack.isEmpty(), "No grey objects");
   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
-                     _ser_kac_ovflw;
+                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
   if (ser_ovflw > 0) {
     if (PrintCMSStatistics != 0) {
       gclog_or_tty->print_cr("Marking stack overflow (benign) "
-        "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
+        "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
+        ", kac_preclean="SIZE_FORMAT")",
         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
-        _ser_kac_ovflw);
+        _ser_kac_ovflw, _ser_kac_preclean_ovflw);
     }
     _markStack.expand();
     _ser_pmc_remark_ovflw = 0;
     _ser_pmc_preclean_ovflw = 0;
+    _ser_kac_preclean_ovflw = 0;
     _ser_kac_ovflw = 0;
   }
   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
@@ -5675,40 +5678,29 @@
 
   ResourceMark rm;
   HandleMark   hm;
-  ReferencePolicy* soft_ref_policy;
-
-  assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
-  // Process weak references.
-  if (clear_all_soft_refs) {
-    soft_ref_policy = new AlwaysClearPolicy();
-  } else {
-#ifdef COMPILER2
-    soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-    soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-  }
-  verify_work_stacks_empty();
 
   ReferenceProcessor* rp = ref_processor();
   assert(rp->span().equals(_span), "Spans should be equal");
+  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
+  // Process weak references.
+  rp->snap_policy(clear_all_soft_refs);
+  verify_work_stacks_empty();
+
   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
-                                          &_markStack);
+                                          &_markStack, false /* !preclean */);
   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
                                 _span, &_markBitMap, &_markStack,
-                                &cmsKeepAliveClosure);
+                                &cmsKeepAliveClosure, false /* !preclean */);
   {
     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
     if (rp->processing_is_mt()) {
       CMSRefProcTaskExecutor task_executor(*this);
-      rp->process_discovered_references(soft_ref_policy,
-                                        &_is_alive_closure,
+      rp->process_discovered_references(&_is_alive_closure,
                                         &cmsKeepAliveClosure,
                                         &cmsDrainMarkingStackClosure,
                                         &task_executor);
     } else {
-      rp->process_discovered_references(soft_ref_policy,
-                                        &_is_alive_closure,
+      rp->process_discovered_references(&_is_alive_closure,
                                         &cmsKeepAliveClosure,
                                         &cmsDrainMarkingStackClosure,
                                         NULL);
@@ -6163,8 +6155,8 @@
 #endif
 
 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
-  assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
-         "missing Printezis mark?");
+   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
+          "missing Printezis mark?");
   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
   size_t size = pointer_delta(nextOneAddr + 1, addr);
   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
@@ -8302,8 +8294,29 @@
       }
     )
     if (simulate_overflow || !_mark_stack->push(obj)) {
-      _collector->push_on_overflow_list(obj);
-      _collector->_ser_kac_ovflw++;
+      if (_concurrent_precleaning) {
+        // We dirty the overflown object and let the remark
+        // phase deal with it.
+        assert(_collector->overflow_list_is_empty(), "Error");
+        // In the case of object arrays, we need to dirty all of
+        // the cards that the object spans. No locking or atomics
+        // are needed since no one else can be mutating the mod union
+        // table.
+        if (obj->is_objArray()) {
+          size_t sz = obj->size();
+          HeapWord* end_card_addr =
+            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
+          MemRegion redirty_range = MemRegion(addr, end_card_addr);
+          assert(!redirty_range.is_empty(), "Arithmetical tautology");
+          _collector->_modUnionTable.mark_range(redirty_range);
+        } else {
+          _collector->_modUnionTable.mark(addr);
+        }
+        _collector->_ser_kac_preclean_ovflw++;
+      } else {
+        _collector->push_on_overflow_list(obj);
+        _collector->_ser_kac_ovflw++;
+      }
     }
   }
 }
@@ -8400,6 +8413,8 @@
 void CMSDrainMarkingStackClosure::do_void() {
   // the max number to take from overflow list at a time
   const size_t num = _mark_stack->capacity()/4;
+  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
+         "Overflow list should be NULL during concurrent phases");
   while (!_mark_stack->isEmpty() ||
          // if stack is empty, check the overflow list
          _collector->take_from_overflow_list(num, _mark_stack)) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Nov 26 09:24:57 2008 -0800
@@ -592,6 +592,7 @@
   size_t        _ser_pmc_preclean_ovflw;
   size_t        _ser_pmc_remark_ovflw;
   size_t        _par_pmc_remark_ovflw;
+  size_t        _ser_kac_preclean_ovflw;
   size_t        _ser_kac_ovflw;
   size_t        _par_kac_ovflw;
   NOT_PRODUCT(size_t _num_par_pushes;)
@@ -1749,21 +1750,30 @@
 // work-routine/closure used to complete transitive
 // marking of objects as live after a certain point
 // in which an initial set has been completely accumulated.
+// This closure is currently used both during the final
+// remark stop-world phase, as well as during the concurrent
+// precleaning of the discovered reference lists.
 class CMSDrainMarkingStackClosure: public VoidClosure {
   CMSCollector*        _collector;
   MemRegion            _span;
   CMSMarkStack*        _mark_stack;
   CMSBitMap*           _bit_map;
   CMSKeepAliveClosure* _keep_alive;
+  bool                 _concurrent_precleaning;
  public:
   CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
-                      CMSKeepAliveClosure* keep_alive):
+                      CMSKeepAliveClosure* keep_alive,
+                      bool cpc):
     _collector(collector),
     _span(span),
     _bit_map(bit_map),
     _mark_stack(mark_stack),
-    _keep_alive(keep_alive) { }
+    _keep_alive(keep_alive),
+    _concurrent_precleaning(cpc) {
+    assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
+           "Mismatch");
+  }
 
   void do_void();
 };
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -811,6 +811,7 @@
   ReferenceProcessor* rp = g1h->ref_processor();
   rp->verify_no_references_recorded();
   rp->enable_discovery(); // enable ("weak") refs discovery
+  rp->snap_policy(false); // snapshot the soft ref policy to be used in this cycle
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold);
@@ -1829,32 +1830,21 @@
 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
   ResourceMark rm;
   HandleMark   hm;
-  ReferencePolicy* soft_ref_policy;
+  G1CollectedHeap* g1h   = G1CollectedHeap::heap();
+  ReferenceProcessor* rp = g1h->ref_processor();
 
   // Process weak references.
-  if (clear_all_soft_refs) {
-    soft_ref_policy = new AlwaysClearPolicy();
-  } else {
-#ifdef COMPILER2
-    soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-    soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif
-  }
+  rp->snap_policy(clear_all_soft_refs);
   assert(_markStack.isEmpty(), "mark stack should be empty");
 
-  G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  G1CMIsAliveClosure g1IsAliveClosure(g1);
-
-  G1CMKeepAliveClosure g1KeepAliveClosure(g1, this, nextMarkBitMap());
+  G1CMIsAliveClosure   g1IsAliveClosure  (g1h);
+  G1CMKeepAliveClosure g1KeepAliveClosure(g1h, this, nextMarkBitMap());
   G1CMDrainMarkingStackClosure
     g1DrainMarkingStackClosure(nextMarkBitMap(), &_markStack,
                                &g1KeepAliveClosure);
 
   // XXXYYY  Also: copy the parallel ref processing code from CMS.
-  ReferenceProcessor* rp = g1->ref_processor();
-  rp->process_discovered_references(soft_ref_policy,
-                                    &g1IsAliveClosure,
+  rp->process_discovered_references(&g1IsAliveClosure,
                                     &g1KeepAliveClosure,
                                     &g1DrainMarkingStackClosure,
                                     NULL);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -891,6 +891,7 @@
     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
 
     ref_processor()->enable_discovery();
+    ref_processor()->snap_policy(clear_all_soft_refs);
 
     // Do collection work
     {
@@ -2463,7 +2464,7 @@
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    // We want to turn off ref discovere, if necessary, and turn it back on
+    // We want to turn off ref discovery, if necessary, and turn it back on
     // on again later if we do.
     bool was_enabled = ref_processor()->discovery_enabled();
     if (was_enabled) ref_processor()->disable_discovery();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -33,8 +33,9 @@
 
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
+  assert(rp != NULL, "should be non-NULL");
   GenMarkSweep::_ref_processor = rp;
-  assert(rp != NULL, "should be non-NULL");
+  rp->snap_policy(clear_all_softrefs);
 
   // When collecting the permanent generation methodOops may be moving,
   // so we either have to flush all bcp data or convert it into bci.
@@ -121,23 +122,12 @@
                            &GenMarkSweep::follow_root_closure);
 
   // Process reference objects found during marking
-  ReferencePolicy *soft_ref_policy;
-  if (clear_all_softrefs) {
-    soft_ref_policy = new AlwaysClearPolicy();
-  } else {
-#ifdef COMPILER2
-    soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-    soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif
-  }
-  assert(soft_ref_policy != NULL,"No soft reference policy");
-  GenMarkSweep::ref_processor()->process_discovered_references(
-                                   soft_ref_policy,
-                                   &GenMarkSweep::is_alive,
-                                   &GenMarkSweep::keep_alive,
-                                   &GenMarkSweep::follow_stack_closure,
-                                   NULL);
+  ReferenceProcessor* rp = GenMarkSweep::ref_processor();
+  rp->snap_policy(clear_all_softrefs);
+  rp->process_discovered_references(&GenMarkSweep::is_alive,
+                                    &GenMarkSweep::keep_alive,
+                                    &GenMarkSweep::follow_stack_closure,
+                                    NULL);
 
   // Follow system dictionary roots and unload classes
   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -759,17 +759,12 @@
                thread_state_set.steals(),
                thread_state_set.pops()+thread_state_set.steals());
   }
-  assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(),
+  assert(thread_state_set.pushes() == thread_state_set.pops()
+                                    + thread_state_set.steals(),
          "Or else the queues are leaky.");
 
-  // For now, process discovered weak refs sequentially.
-#ifdef COMPILER2
-  ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-  ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-
   // Process (weak) reference objects found during scavenge.
+  ReferenceProcessor* rp = ref_processor();
   IsAliveClosure is_alive(this);
   ScanWeakRefClosure scan_weak_ref(this);
   KeepAliveClosure keep_alive(&scan_weak_ref);
@@ -778,18 +773,17 @@
   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
     &scan_without_gc_barrier, &scan_with_gc_barrier);
-  if (ref_processor()->processing_is_mt()) {
+  rp->snap_policy(clear_all_soft_refs);
+  if (rp->processing_is_mt()) {
     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
-    ref_processor()->process_discovered_references(
-        soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
-        &task_executor);
+    rp->process_discovered_references(&is_alive, &keep_alive,
+                                      &evacuate_followers, &task_executor);
   } else {
     thread_state_set.flush();
     gch->set_par_threads(0);  // 0 ==> non-parallel.
     gch->save_marks();
-    ref_processor()->process_discovered_references(
-      soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
-      NULL);
+    rp->process_discovered_references(&is_alive, &keep_alive,
+                                      &evacuate_followers, NULL);
   }
   if (!promotion_failed()) {
     // Swap the survivor spaces.
@@ -851,14 +845,14 @@
 
   SpecializationStats::print();
 
-  ref_processor()->set_enqueuing_is_done(true);
-  if (ref_processor()->processing_is_mt()) {
+  rp->set_enqueuing_is_done(true);
+  if (rp->processing_is_mt()) {
     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
-    ref_processor()->enqueue_discovered_references(&task_executor);
+    rp->enqueue_discovered_references(&task_executor);
   } else {
-    ref_processor()->enqueue_discovered_references(NULL);
+    rp->enqueue_discovered_references(NULL);
   }
-  ref_processor()->verify_no_references_recorded();
+  rp->verify_no_references_recorded();
 }
 
 static int sum;
@@ -1211,7 +1205,7 @@
   int n = 0;
   while (cur != NULL) {
     oop obj_to_push = cur->forwardee();
-    oop next        = oop(cur->klass());
+    oop next        = oop(cur->klass_or_null());
     cur->set_klass(obj_to_push->klass());
     if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
       obj_to_push = cur;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -172,6 +172,7 @@
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
     ref_processor()->enable_discovery();
+    ref_processor()->snap_policy(clear_all_softrefs);
 
     mark_sweep_phase1(clear_all_softrefs);
 
@@ -517,20 +518,9 @@
 
   // Process reference objects found during marking
   {
-    ReferencePolicy *soft_ref_policy;
-    if (clear_all_softrefs) {
-      soft_ref_policy = new AlwaysClearPolicy();
-    } else {
-#ifdef COMPILER2
-      soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-      soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-    }
-    assert(soft_ref_policy != NULL,"No soft reference policy");
+    ref_processor()->snap_policy(clear_all_softrefs);
     ref_processor()->process_discovered_references(
-      soft_ref_policy, is_alive_closure(), mark_and_push_closure(),
-      follow_stack_closure(), NULL);
+      is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
   }
 
   // Follow system dictionary roots and unload classes
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -1578,6 +1578,7 @@
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
     ref_processor()->enable_discovery();
+    ref_processor()->snap_policy(maximum_heap_compaction);
 
     bool marked_for_unloading = false;
 
@@ -1894,26 +1895,14 @@
   // Process reference objects found during marking
   {
     TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
-    ReferencePolicy *soft_ref_policy;
-    if (maximum_heap_compaction) {
-      soft_ref_policy = new AlwaysClearPolicy();
-    } else {
-#ifdef COMPILER2
-      soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-      soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-    }
-    assert(soft_ref_policy != NULL, "No soft reference policy");
     if (ref_processor()->processing_is_mt()) {
       RefProcTaskExecutor task_executor;
       ref_processor()->process_discovered_references(
-        soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
-        &follow_stack_closure, &task_executor);
+        is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
+        &task_executor);
     } else {
       ref_processor()->process_discovered_references(
-        soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
-        &follow_stack_closure, NULL);
+        is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
     }
   }
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -330,6 +330,7 @@
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
     reference_processor()->enable_discovery();
+    reference_processor()->snap_policy(false);
 
     // We track how much was promoted to the next generation for
     // the AdaptiveSizePolicy.
@@ -394,24 +395,16 @@
 
     // Process reference objects discovered during scavenge
     {
-#ifdef COMPILER2
-      ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-      ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-
+      reference_processor()->snap_policy(false); // not always_clear
       PSKeepAliveClosure keep_alive(promotion_manager);
       PSEvacuateFollowersClosure evac_followers(promotion_manager);
-      assert(soft_ref_policy != NULL,"No soft reference policy");
       if (reference_processor()->processing_is_mt()) {
         PSRefProcTaskExecutor task_executor;
         reference_processor()->process_discovered_references(
-          soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
-          &task_executor);
+          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
       } else {
         reference_processor()->process_discovered_references(
-          soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
-          NULL);
+          &_is_alive_closure, &keep_alive, &evac_followers, NULL);
       }
     }
 
--- a/hotspot/src/share/vm/includeDB_core	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/includeDB_core	Wed Nov 26 09:24:57 2008 -0800
@@ -3434,6 +3434,7 @@
 referenceProcessor.cpp                  systemDictionary.hpp
 
 referenceProcessor.hpp                  instanceRefKlass.hpp
+referenceProcessor.hpp                  referencePolicy.hpp
 
 reflection.cpp                          arguments.hpp
 reflection.cpp                          handles.inline.hpp
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -540,14 +540,6 @@
   assert(gch->no_allocs_since_save_marks(0),
          "save marks have not been newly set.");
 
-  // Weak refs.
-  // FIXME: Are these storage leaks, or are they resource objects?
-#ifdef COMPILER2
-  ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-  ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-
   // Not very pretty.
   CollectorPolicy* cp = gch->collector_policy();
 
@@ -574,8 +566,10 @@
   evacuate_followers.do_void();
 
   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
-  ref_processor()->process_discovered_references(
-    soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
+  ReferenceProcessor* rp = ref_processor();
+  rp->snap_policy(clear_all_soft_refs);
+  rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
+                                    NULL);
   if (!promotion_failed()) {
     // Swap the survivor spaces.
     eden()->clear(SpaceDecorator::Mangle);
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -525,8 +525,9 @@
           if (rp->discovery_is_atomic()) {
             rp->verify_no_references_recorded();
             rp->enable_discovery();
+            rp->snap_policy(clear_all_soft_refs);
           } else {
-            // collect() will enable discovery as appropriate
+            // collect() below will enable discovery as appropriate
           }
           _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab);
           if (!rp->enqueuing_is_done()) {
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -31,8 +31,9 @@
 
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(ref_processor() == NULL, "no stomping");
+  assert(rp != NULL, "should be non-NULL");
   _ref_processor = rp;
-  assert(rp != NULL, "should be non-NULL");
+  rp->snap_policy(clear_all_softrefs);
 
   TraceTime t1("Full GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
 
@@ -245,20 +246,9 @@
 
   // Process reference objects found during marking
   {
-    ReferencePolicy *soft_ref_policy;
-    if (clear_all_softrefs) {
-      soft_ref_policy = new AlwaysClearPolicy();
-    } else {
-#ifdef COMPILER2
-      soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-      soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-    }
-    assert(soft_ref_policy != NULL,"No soft reference policy");
+    ref_processor()->snap_policy(clear_all_softrefs);
     ref_processor()->process_discovered_references(
-      soft_ref_policy, &is_alive, &keep_alive,
-      &follow_stack_closure, NULL);
+      &is_alive, &keep_alive, &follow_stack_closure, NULL);
   }
 
   // Follow system dictionary roots and unload classes
--- a/hotspot/src/share/vm/memory/referencePolicy.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/memory/referencePolicy.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -26,6 +26,11 @@
 # include "incls/_referencePolicy.cpp.incl"
 
 LRUCurrentHeapPolicy::LRUCurrentHeapPolicy() {
+  snap();
+}
+
+// Capture state (of-the-VM) information needed to evaluate the policy
+void LRUCurrentHeapPolicy::snap() {
   _max_interval = (Universe::get_heap_free_at_last_gc() / M) * SoftRefLRUPolicyMSPerMB;
   assert(_max_interval >= 0,"Sanity check");
 }
@@ -47,6 +52,11 @@
 /////////////////////// MaxHeap //////////////////////
 
 LRUMaxHeapPolicy::LRUMaxHeapPolicy() {
+  snap();
+}
+
+// Capture state (of-the-VM) information needed to evaluate the policy
+void LRUMaxHeapPolicy::snap() {
   size_t max_heap = MaxHeapSize;
   max_heap -= Universe::get_heap_used_at_last_gc();
   max_heap /= M;
--- a/hotspot/src/share/vm/memory/referencePolicy.hpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/memory/referencePolicy.hpp	Wed Nov 26 09:24:57 2008 -0800
@@ -26,9 +26,11 @@
 // should be cleared.
 
 
-class ReferencePolicy : public ResourceObj {
+class ReferencePolicy : public CHeapObj {
  public:
   virtual bool should_clear_reference(oop p)       { ShouldNotReachHere(); return true; }
+  // Capture state (of-the-VM) information needed to evaluate the policy
+  virtual void snap() { /* do nothing */ }
 };
 
 class NeverClearPolicy : public ReferencePolicy {
@@ -48,6 +50,8 @@
  public:
   LRUCurrentHeapPolicy();
 
+  // Capture state (of-the-VM) information needed to evaluate the policy
+  void snap();
   bool should_clear_reference(oop p);
 };
 
@@ -58,5 +62,7 @@
  public:
   LRUMaxHeapPolicy();
 
+  // Capture state (of-the-VM) information needed to evaluate the policy
+  void snap();
   bool should_clear_reference(oop p);
 };
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -25,6 +25,11 @@
 # include "incls/_precompiled.incl"
 # include "incls/_referenceProcessor.cpp.incl"
 
+ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
+ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
+oop              ReferenceProcessor::_sentinelRef = NULL;
+const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
+
 // List of discovered references.
 class DiscoveredList {
 public:
@@ -47,7 +52,9 @@
   }
   bool   empty() const          { return head() == ReferenceProcessor::sentinel_ref(); }
   size_t length()               { return _len; }
-  void   set_length(size_t len) { _len = len; }
+  void   set_length(size_t len) { _len = len;  }
+  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
+  void   dec_length(size_t dec) { _len -= dec; }
 private:
   // Set value depending on UseCompressedOops. This could be a template class
   // but then we have to fix all the instantiations and declarations that use this class.
@@ -56,10 +63,6 @@
   size_t _len;
 };
 
-oop  ReferenceProcessor::_sentinelRef = NULL;
-
-const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
-
 void referenceProcessor_init() {
   ReferenceProcessor::init_statics();
 }
@@ -80,6 +83,12 @@
   }
   assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
          "Just constructed it!");
+  _always_clear_soft_ref_policy = new AlwaysClearPolicy();
+  _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
+                                      NOT_COMPILER2(LRUCurrentHeapPolicy());
+  if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
+    vm_exit_during_initialization("Could not allocate reference policy object");
+  }
   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
             RefDiscoveryPolicy == ReferentBasedDiscovery,
             "Unrecongnized RefDiscoveryPolicy");
@@ -106,6 +115,7 @@
     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
   }
   rp->set_is_alive_non_header(is_alive_non_header);
+  rp->snap_policy(false /* default soft ref policy */);
   return rp;
 }
 
@@ -192,7 +202,6 @@
 }
 
 void ReferenceProcessor::process_discovered_references(
-  ReferencePolicy*             policy,
   BoolObjectClosure*           is_alive,
   OopClosure*                  keep_alive,
   VoidClosure*                 complete_gc,
@@ -207,7 +216,7 @@
   // Soft references
   {
     TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
-    process_discovered_reflist(_discoveredSoftRefs, policy, true,
+    process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
 
@@ -436,13 +445,13 @@
   // The "allow_null_referent" argument tells us to allow for the possibility
   // of a NULL referent in the discovered Reference object. This typically
   // happens in the case of concurrent collectors that may have done the
-  // discovery concurrently or interleaved with mutator execution.
+  // discovery concurrently, or interleaved, with mutator execution.
   inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
 
   // Move to the next discovered reference.
   inline void next();
 
-  // Remove the current reference from the list and move to the next.
+  // Remove the current reference from the list
   inline void remove();
 
   // Make the Reference object active again.
@@ -476,7 +485,6 @@
   inline size_t removed() const   { return _removed; }
   )
 
-private:
   inline void move_to_next();
 
 private:
@@ -553,7 +561,7 @@
     oopDesc::store_heap_oop((oop*)_prev_next, _next);
   }
   NOT_PRODUCT(_removed++);
-  move_to_next();
+  _refs_list.dec_length(1);
 }
 
 inline void DiscoveredListIterator::move_to_next() {
@@ -591,12 +599,13 @@
         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
                                iter.obj(), iter.obj()->blueprint()->internal_name());
       }
+      // Remove Reference object from list
+      iter.remove();
       // Make the Reference object active again
       iter.make_active();
       // keep the referent around
       iter.make_referent_alive();
-      // Remove Reference object from list
-      iter.remove();
+      iter.move_to_next();
     } else {
       iter.next();
     }
@@ -629,12 +638,13 @@
                                iter.obj(), iter.obj()->blueprint()->internal_name());
       }
       // The referent is reachable after all.
+      // Remove Reference object from list.
+      iter.remove();
       // Update the referent pointer as necessary: Note that this
       // should not entail any recursive marking because the
       // referent must already have been traversed.
       iter.make_referent_alive();
-      // Remove Reference object from list
-      iter.remove();
+      iter.move_to_next();
     } else {
       iter.next();
     }
@@ -670,6 +680,7 @@
       } else {
         keep_alive->do_oop((oop*)next_addr);
       }
+      iter.move_to_next();
     } else {
       iter.next();
     }
@@ -832,9 +843,9 @@
         }
         java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
         ref_lists[to_idx].set_head(move_head);
-        ref_lists[to_idx].set_length(ref_lists[to_idx].length() + refs_to_move);
+        ref_lists[to_idx].inc_length(refs_to_move);
         ref_lists[from_idx].set_head(new_head);
-        ref_lists[from_idx].set_length(ref_lists[from_idx].length() - refs_to_move);
+        ref_lists[from_idx].dec_length(refs_to_move);
       } else {
         ++to_idx;
       }
@@ -923,7 +934,6 @@
 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
   assert(!discovery_is_atomic(), "Else why call this method?");
   DiscoveredListIterator iter(refs_list, NULL, NULL);
-  size_t length = refs_list.length();
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop next = java_lang_ref_Reference::next(iter.obj());
@@ -941,12 +951,11 @@
       )
       // Remove Reference object from list
       iter.remove();
-      --length;
+      iter.move_to_next();
     } else {
       iter.next();
     }
   }
-  refs_list.set_length(length);
   NOT_PRODUCT(
     if (PrintGCDetails && TraceReferenceGC) {
       gclog_or_tty->print(
@@ -1024,7 +1033,7 @@
     // We have separate lists for enqueueing so no synchronization
     // is necessary.
     refs_list.set_head(obj);
-    refs_list.set_length(refs_list.length() + 1);
+    refs_list.inc_length(1);
     if (_discovered_list_needs_barrier) {
       _bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
     }
@@ -1090,15 +1099,28 @@
   // reachable.
   if (is_alive_non_header() != NULL) {
     oop referent = java_lang_ref_Reference::referent(obj);
-    // We'd like to assert the following:
-    // assert(referent != NULL, "Refs with null referents already filtered");
-    // However, since this code may be executed concurrently with
-    // mutators, which can clear() the referent, it is not
-    // guaranteed that the referent is non-NULL.
+    // In the case of non-concurrent discovery, the last
+    // disjunct below should hold. It may not hold in the
+    // case of concurrent discovery because mutators may
+    // concurrently clear() a Reference.
+    assert(UseConcMarkSweepGC || UseG1GC || referent != NULL,
+           "Refs with null referents already filtered");
     if (is_alive_non_header()->do_object_b(referent)) {
       return false;  // referent is reachable
     }
   }
+  if (rt == REF_SOFT) {
+    // For soft refs we can decide now if these are not
+    // current candidates for clearing, in which case we
+    // can mark through them now, rather than delaying that
+    // to the reference-processing phase. Since all current
+    // time-stamp policies advance the soft-ref clock only
+    // at a major collection cycle, this is always currently
+    // accurate.
+    if (!_current_soft_ref_policy->should_clear_reference(obj)) {
+      return false;
+    }
+  }
 
   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
   const oop  discovered = java_lang_ref_Reference::discovered(obj);
@@ -1168,7 +1190,7 @@
       _bs->write_ref_field((oop*)discovered_addr, current_head);
     }
     list->set_head(obj);
-    list->set_length(list->length() + 1);
+    list->inc_length(1);
   }
 
   // In the MT discovery case, it is currently possible to see
@@ -1209,45 +1231,48 @@
     TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
     for (int i = 0; i < _num_q; i++) {
+      if (yield->should_return()) {
+        return;
+      }
       preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
                                   keep_alive, complete_gc, yield);
     }
   }
-  if (yield->should_return()) {
-    return;
-  }
 
   // Weak references
   {
     TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
     for (int i = 0; i < _num_q; i++) {
+      if (yield->should_return()) {
+        return;
+      }
       preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
                                   keep_alive, complete_gc, yield);
     }
   }
-  if (yield->should_return()) {
-    return;
-  }
 
   // Final references
   {
     TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
     for (int i = 0; i < _num_q; i++) {
+      if (yield->should_return()) {
+        return;
+      }
       preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
                                   keep_alive, complete_gc, yield);
     }
   }
-  if (yield->should_return()) {
-    return;
-  }
 
   // Phantom references
   {
     TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
     for (int i = 0; i < _num_q; i++) {
+      if (yield->should_return()) {
+        return;
+      }
       preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
                                   keep_alive, complete_gc, yield);
     }
@@ -1256,9 +1281,12 @@
 
 // Walk the given discovered ref list, and remove all reference objects
 // whose referents are still alive, whose referents are NULL or which
-// are not active (have a non-NULL next field). NOTE: For this to work
-// correctly, refs discovery can not be happening concurrently with this
-// step.
+// are not active (have a non-NULL next field). NOTE: When we are
+// thus precleaning the ref lists (which happens single-threaded today),
+// we do not disable refs discovery to honour the correct semantics of
+// java.lang.Reference. As a result, we need to be careful below
+// that ref removal steps interleave safely with ref discovery steps
+// (in this thread).
 void
 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList&    refs_list,
                                                 BoolObjectClosure* is_alive,
@@ -1266,7 +1294,6 @@
                                                 VoidClosure*       complete_gc,
                                                 YieldClosure*      yield) {
   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
-  size_t length = refs_list.length();
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop obj = iter.obj();
@@ -1281,7 +1308,6 @@
       }
       // Remove Reference object from list
       iter.remove();
-      --length;
       // Keep alive its cohort.
       iter.make_referent_alive();
       if (UseCompressedOops) {
@@ -1291,12 +1317,11 @@
         oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
         keep_alive->do_oop(next_addr);
       }
+      iter.move_to_next();
     } else {
       iter.next();
     }
   }
-  refs_list.set_length(length);
-
   // Close the reachable set
   complete_gc->do_void();
 
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Wed Nov 26 09:24:57 2008 -0800
@@ -23,7 +23,7 @@
  */
 
 // ReferenceProcessor class encapsulates the per-"collector" processing
-// of "weak" references for GC. The interface is useful for supporting
+// of java.lang.Reference objects for GC. The interface is useful for supporting
 // a generational abstraction, in particular when there are multiple
 // generations that are being independently collected -- possibly
 // concurrently and/or incrementally.  Note, however, that the
@@ -75,6 +75,14 @@
   // all collectors but the CMS collector).
   BoolObjectClosure* _is_alive_non_header;
 
+  // Soft ref clearing policies
+  // . the default policy
+  static ReferencePolicy*   _default_soft_ref_policy;
+  // . the "clear all" policy
+  static ReferencePolicy*   _always_clear_soft_ref_policy;
+  // . the current policy below is either one of the above
+  ReferencePolicy*          _current_soft_ref_policy;
+
   // The discovered ref lists themselves
 
   // The MT'ness degree of the queues below
@@ -90,6 +98,12 @@
   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
   static oop  sentinel_ref()             { return _sentinelRef; }
   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
+  ReferencePolicy* snap_policy(bool always_clear) {
+    _current_soft_ref_policy = always_clear ?
+      _always_clear_soft_ref_policy : _default_soft_ref_policy;
+    _current_soft_ref_policy->snap();   // snapshot the policy threshold
+    return _current_soft_ref_policy;
+  }
 
  public:
   // Process references with a certain reachability level.
@@ -297,8 +311,7 @@
   bool discover_reference(oop obj, ReferenceType rt);
 
   // Process references found during GC (called by the garbage collector)
-  void process_discovered_references(ReferencePolicy*             policy,
-                                     BoolObjectClosure*           is_alive,
+  void process_discovered_references(BoolObjectClosure*           is_alive,
                                      OopClosure*                  keep_alive,
                                      VoidClosure*                 complete_gc,
                                      AbstractRefProcTaskExecutor* task_executor);
--- a/hotspot/src/share/vm/memory/universe.cpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/memory/universe.cpp	Wed Nov 26 09:24:57 2008 -0800
@@ -96,7 +96,7 @@
 bool            Universe::_fully_initialized = false;
 
 size_t          Universe::_heap_capacity_at_last_gc;
-size_t          Universe::_heap_used_at_last_gc;
+size_t          Universe::_heap_used_at_last_gc = 0;
 
 CollectedHeap*  Universe::_collectedHeap = NULL;
 address         Universe::_heap_base = NULL;
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Wed Nov 26 09:24:57 2008 -0800
@@ -92,7 +92,7 @@
   // This is only to be used during GC, for from-space objects, so no
   // barrier is needed.
   if (UseCompressedOops) {
-    _metadata._compressed_klass = encode_heap_oop_not_null(k);
+    _metadata._compressed_klass = encode_heap_oop(k);  // may be null (parnew overflow handling)
   } else {
     _metadata._klass = (klassOop)k;
   }
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Nov 26 09:24:57 2008 -0800
@@ -1474,7 +1474,7 @@
           "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence"  \
           " ratio")                                                         \
                                                                             \
-  product(bool, CMSPrecleanRefLists1, false,                                \
+  product(bool, CMSPrecleanRefLists1, true,                                 \
           "Preclean ref lists during (initial) preclean phase")             \
                                                                             \
   product(bool, CMSPrecleanRefLists2, false,                                \
--- a/hotspot/src/share/vm/utilities/macros.hpp	Tue Nov 25 15:59:23 2008 -0500
+++ b/hotspot/src/share/vm/utilities/macros.hpp	Wed Nov 26 09:24:57 2008 -0800
@@ -65,8 +65,10 @@
 // COMPILER2 variant
 #ifdef COMPILER2
 #define COMPILER2_PRESENT(code) code
+#define NOT_COMPILER2(code)
 #else // COMPILER2
 #define COMPILER2_PRESENT(code)
+#define NOT_COMPILER2(code) code
 #endif // COMPILER2