Merge
authorysr
Tue, 26 Jan 2010 23:36:11 -0800
changeset 4743 fa9e9cc780e6
parent 4742 9b18850a3185 (diff)
parent 4734 18210e18831a (current diff)
child 4744 40fc0ab5cd15
Merge
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Jan 26 23:36:11 2010 -0800
@@ -718,10 +718,8 @@
       case BarrierSet::G1SATBCTLogging:
         {
           __ pusha();                      // push registers
-          __ push(count);
-          __ push(start);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
-          __ addptr(rsp, 2*wordSize);
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
+                          start, count);
           __ popa();
         }
         break;
@@ -752,10 +750,8 @@
       case BarrierSet::G1SATBCTLogging:
         {
           __ pusha();                      // push registers
-          __ push(count);
-          __ push(start);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
-          __ addptr(rsp, 2*wordSize);
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
+                          start, count);
           __ popa();
         }
         break;
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Jan 26 23:36:11 2010 -0800
@@ -1172,7 +1172,7 @@
             __ movptr(c_rarg0, addr);
             __ movptr(c_rarg1, count);
           }
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
           __ popa();
         }
         break;
@@ -1212,7 +1212,7 @@
           __ shrptr(scratch, LogBytesPerHeapOop);  // convert to element count
           __ mov(c_rarg0, start);
           __ mov(c_rarg1, scratch);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
           __ popa();
         }
         break;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jan 26 23:36:11 2010 -0800
@@ -3655,9 +3655,7 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
   assert(_revisitStack.isEmpty(), "tabula rasa");
-
-  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
-
+  DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
   bool result = false;
   if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
     result = do_marking_mt(asynch);
@@ -4124,7 +4122,6 @@
 void CMSConcMarkingTask::coordinator_yield() {
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
          "CMS thread should hold CMS token");
-
   DEBUG_ONLY(RememberKlassesChecker mux(false);)
   // First give up the locks, then yield, then re-lock
   // We should probably use a constructor/destructor idiom to
@@ -4201,9 +4198,7 @@
   // Mutate the Refs discovery so it is MT during the
   // multi-threaded marking phase.
   ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
-
-  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
-
+  DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
   conc_workers()->start_task(&tsk);
   while (tsk.yielded()) {
     tsk.coordinator_yield();
@@ -4472,7 +4467,7 @@
     // for cleaner interfaces.
     rp->preclean_discovered_references(
           rp->is_alive_non_header(), &keep_alive, &complete_trace,
-          &yield_cl);
+          &yield_cl, should_unload_classes());
   }
 
   if (clean_survivor) {  // preclean the active survivor space(s)
@@ -4494,7 +4489,7 @@
     SurvivorSpacePrecleanClosure
       sss_cl(this, _span, &_markBitMap, &_markStack,
              &pam_cl, before_count, CMSYield);
-    DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
+    DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
     dng->from()->object_iterate_careful(&sss_cl);
     dng->to()->object_iterate_careful(&sss_cl);
   }
@@ -4665,7 +4660,7 @@
         verify_work_stacks_empty();
         verify_overflow_empty();
         sample_eden();
-        DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
+        DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
         stop_point =
           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       }
@@ -4753,7 +4748,7 @@
       sample_eden();
       verify_work_stacks_empty();
       verify_overflow_empty();
-      DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
+      DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
       HeapWord* stop_point =
         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       if (stop_point != NULL) {
@@ -4853,7 +4848,7 @@
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
 
-  DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
+  DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
   if (!init_mark_was_synchronous) {
     // We might assume that we need not fill TLAB's when
     // CMSScavengeBeforeRemark is set, because we may have just done
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Jan 26 23:36:11 2010 -0800
@@ -1004,7 +1004,12 @@
   // storage in the heap comes from a young region or not.
   // See ReduceInitialCardMarks.
   virtual bool can_elide_tlab_store_barriers() const {
-    return true;
+    // 6920090: Temporarily disabled, because of lingering
+    // instabilities related to RICM with G1. In the
+    // interim, the option ReduceInitialCardMarksForG1
+    // below is left solely as a debugging device at least
+    // until 6920109 fixes the instabilities.
+    return ReduceInitialCardMarksForG1;
   }
 
   virtual bool card_mark_must_follow_store() const {
@@ -1026,6 +1031,8 @@
   // However, non-generational G1 (-XX:-G1Gen) appears to have
   // bit-rotted so was not tested below.
   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
+    // Re 6920090, 6920109 above.
+    assert(ReduceInitialCardMarksForG1, "Else cannot be here");
     assert(G1Gen || !is_in_young(new_obj),
            "Non-generational G1 should never return true below");
     return is_in_young(new_obj);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Tue Jan 26 23:36:11 2010 -0800
@@ -291,6 +291,10 @@
           "a particular entry exceeds this value.")                         \
                                                                             \
   develop(bool, G1VerifyCTCleanup, false,                                   \
-          "Verify card table cleanup.")
+          "Verify card table cleanup.")                                     \
+                                                                            \
+  develop(bool, ReduceInitialCardMarksForG1, false,                         \
+          "When ReduceInitialCardMarks is true, this flag setting "         \
+          " controls whether G1 allows the RICM optimization")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Tue Jan 26 23:36:11 2010 -0800
@@ -66,7 +66,8 @@
   // Used for ReduceInitialCardMarks (when COMPILER2 is used);
   // otherwise remains unused.
 #ifdef COMPLER2
-  _defer_initial_card_mark = ReduceInitialCardMarks && (DeferInitialCardMark || card_mark_must_follow_store());
+  _defer_initial_card_mark =    ReduceInitialCardMarks && can_elide_tlab_store_barriers()
+                             && (DeferInitialCardMark || card_mark_must_follow_store());
 #else
   assert(_defer_initial_card_mark == false, "Who would set it?");
 #endif
--- a/hotspot/src/share/vm/memory/iterator.hpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Tue Jan 26 23:36:11 2010 -0800
@@ -296,23 +296,32 @@
 // RememberKlassesChecker can be passed "false" to turn off checking.
 // It is used by CMS when CMS yields to a different collector.
 class RememberKlassesChecker: StackObj {
- bool _state;
- bool _skip;
+ bool _saved_state;
+ bool _do_check;
  public:
-  RememberKlassesChecker(bool checking_on) : _state(false), _skip(false) {
-    _skip = !(ClassUnloading && !UseConcMarkSweepGC ||
-              CMSClassUnloadingEnabled && UseConcMarkSweepGC);
-    if (_skip) {
-      return;
+  RememberKlassesChecker(bool checking_on) : _saved_state(false),
+    _do_check(true) {
+    // The ClassUnloading unloading flag affects the collectors except
+    // for CMS.
+    // CMS unloads classes if CMSClassUnloadingEnabled is true or
+    // if ExplicitGCInvokesConcurrentAndUnloadsClasses is true and
+    // the current collection is an explicit collection.  Turning
+    // on the checking in general for
+    // ExplicitGCInvokesConcurrentAndUnloadsClasses and
+    // UseConcMarkSweepGC should not lead to false positives.
+    _do_check =
+      ClassUnloading && !UseConcMarkSweepGC ||
+      CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
+      ExplicitGCInvokesConcurrentAndUnloadsClasses && UseConcMarkSweepGC;
+    if (_do_check) {
+      _saved_state = OopClosure::must_remember_klasses();
+      OopClosure::set_must_remember_klasses(checking_on);
     }
-    _state = OopClosure::must_remember_klasses();
-    OopClosure::set_must_remember_klasses(checking_on);
   }
   ~RememberKlassesChecker() {
-    if (_skip) {
-      return;
+    if (_do_check) {
+      OopClosure::set_must_remember_klasses(_saved_state);
     }
-    OopClosure::set_must_remember_klasses(_state);
   }
 };
 #endif  // ASSERT
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Tue Jan 26 23:36:11 2010 -0800
@@ -1227,13 +1227,16 @@
   BoolObjectClosure* is_alive,
   OopClosure* keep_alive,
   VoidClosure* complete_gc,
-  YieldClosure* yield) {
+  YieldClosure* yield,
+  bool should_unload_classes) {
 
   NOT_PRODUCT(verify_ok_to_handle_reflists());
 
 #ifdef ASSERT
   bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
-                               CMSClassUnloadingEnabled && UseConcMarkSweepGC;
+                               CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
+                               ExplicitGCInvokesConcurrentAndUnloadsClasses &&
+                                 UseConcMarkSweepGC && should_unload_classes;
   RememberKlassesChecker mx(must_remember_klasses);
 #endif
   // Soft references
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Tue Jan 26 23:36:11 2010 -0800
@@ -170,11 +170,13 @@
   // The caller is responsible for taking care of potential
   // interference with concurrent operations on these lists
   // (or predicates involved) by other threads. Currently
-  // only used by the CMS collector.
+  // only used by the CMS collector.  should_unload_classes is
+  // used to aid assertion checking when classes are collected.
   void preclean_discovered_references(BoolObjectClosure* is_alive,
                                       OopClosure*        keep_alive,
                                       VoidClosure*       complete_gc,
-                                      YieldClosure*      yield);
+                                      YieldClosure*      yield,
+                                      bool               should_unload_classes);
 
   // Delete entries in the discovered lists that have
   // either a null referent or are not active. Such
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Jan 22 15:06:53 2010 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Tue Jan 26 23:36:11 2010 -0800
@@ -1201,7 +1201,7 @@
   product(bool, UseSerialGC, false,                                         \
           "Use the serial garbage collector")                               \
                                                                             \
-  experimental(bool, UseG1GC, false,                                        \
+  product(bool, UseG1GC, false,                                             \
           "Use the Garbage-First garbage collector")                        \
                                                                             \
   product(bool, UseParallelGC, false,                                       \