hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
changeset 3262 30d1c247fc25
parent 3184 b92e28cc020b
child 3283 c8b1b0aecdfe
equal deleted inserted replaced
3192:607a56c8880f 3262:30d1c247fc25
   450 
   450 
   451   _markStack.allocate(G1MarkStackSize);
   451   _markStack.allocate(G1MarkStackSize);
   452   _regionStack.allocate(G1MarkRegionStackSize);
   452   _regionStack.allocate(G1MarkRegionStackSize);
   453 
   453 
   454   // Create & start a ConcurrentMark thread.
   454   // Create & start a ConcurrentMark thread.
   455   if (G1ConcMark) {
   455   _cmThread = new ConcurrentMarkThread(this);
   456     _cmThread = new ConcurrentMarkThread(this);
   456   assert(cmThread() != NULL, "CM Thread should have been created");
   457     assert(cmThread() != NULL, "CM Thread should have been created");
   457   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
   458     assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
   458 
   459   } else {
       
   460     _cmThread = NULL;
       
   461   }
       
   462   _g1h = G1CollectedHeap::heap();
   459   _g1h = G1CollectedHeap::heap();
   463   assert(CGC_lock != NULL, "Where's the CGC_lock?");
   460   assert(CGC_lock != NULL, "Where's the CGC_lock?");
   464   assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
   461   assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
   465   assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
   462   assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
   466 
   463 
   781   CMMarkRootsClosure(ConcurrentMark* cm,
   778   CMMarkRootsClosure(ConcurrentMark* cm,
   782                      G1CollectedHeap* g1h,
   779                      G1CollectedHeap* g1h,
   783                      bool do_barrier) : _cm(cm), _g1h(g1h),
   780                      bool do_barrier) : _cm(cm), _g1h(g1h),
   784                                         _do_barrier(do_barrier) { }
   781                                         _do_barrier(do_barrier) { }
   785 
   782 
   786   virtual void do_oop(narrowOop* p) {
   783   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   787     guarantee(false, "NYI");
   784   virtual void do_oop(      oop* p) { do_oop_work(p); }
   788   }
   785 
   789 
   786   template <class T> void do_oop_work(T* p) {
   790   virtual void do_oop(oop* p) {
   787     T heap_oop = oopDesc::load_heap_oop(p);
   791     oop thisOop = *p;
   788     if (!oopDesc::is_null(heap_oop)) {
   792     if (thisOop != NULL) {
   789       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
   793       assert(thisOop->is_oop() || thisOop->mark() == NULL,
   790       assert(obj->is_oop() || obj->mark() == NULL,
   794              "expected an oop, possibly with mark word displaced");
   791              "expected an oop, possibly with mark word displaced");
   795       HeapWord* addr = (HeapWord*)thisOop;
   792       HeapWord* addr = (HeapWord*)obj;
   796       if (_g1h->is_in_g1_reserved(addr)) {
   793       if (_g1h->is_in_g1_reserved(addr)) {
   797         _cm->grayRoot(thisOop);
   794         _cm->grayRoot(obj);
   798       }
   795       }
   799     }
   796     }
   800     if (_do_barrier) {
   797     if (_do_barrier) {
   801       assert(!_g1h->is_in_g1_reserved(p),
   798       assert(!_g1h->is_in_g1_reserved(p),
   802              "Should be called on external roots");
   799              "Should be called on external roots");
   847   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
   844   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
   848   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   845   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   849 
   846 
   850   double start = os::elapsedTime();
   847   double start = os::elapsedTime();
   851   GCOverheadReporter::recordSTWStart(start);
   848   GCOverheadReporter::recordSTWStart(start);
   852 
       
   853   // If there has not been a GC[n-1] since last GC[n] cycle completed,
       
   854   // precede our marking with a collection of all
       
   855   // younger generations to keep floating garbage to a minimum.
       
   856   // YSR: we won't do this for now -- it's an optimization to be
       
   857   // done post-beta.
       
   858 
       
   859   // YSR:    ignoring weak refs for now; will do at bug fixing stage
       
   860   // EVM:    assert(discoveredRefsAreClear());
       
   861 
       
   862 
   849 
   863   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
   850   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
   864   g1p->record_concurrent_mark_init_start();
   851   g1p->record_concurrent_mark_init_start();
   865   checkpointRootsInitialPre();
   852   checkpointRootsInitialPre();
   866 
   853 
  1133   if (has_aborted()) {
  1120   if (has_aborted()) {
  1134     g1h->set_marking_complete(); // So bitmap clearing isn't confused
  1121     g1h->set_marking_complete(); // So bitmap clearing isn't confused
  1135     return;
  1122     return;
  1136   }
  1123   }
  1137 
  1124 
       
  1125   if (VerifyDuringGC) {
       
  1126     HandleMark hm;  // handle scope
       
  1127     gclog_or_tty->print(" VerifyDuringGC:(before)");
       
  1128     Universe::heap()->prepare_for_verify();
       
  1129     Universe::verify(true, false, true);
       
  1130   }
       
  1131 
  1138   G1CollectorPolicy* g1p = g1h->g1_policy();
  1132   G1CollectorPolicy* g1p = g1h->g1_policy();
  1139   g1p->record_concurrent_mark_remark_start();
  1133   g1p->record_concurrent_mark_remark_start();
  1140 
  1134 
  1141   double start = os::elapsedTime();
  1135   double start = os::elapsedTime();
  1142   GCOverheadReporter::recordSTWStart(start);
  1136   GCOverheadReporter::recordSTWStart(start);
  1157   } else {
  1151   } else {
  1158     // We're done with marking.
  1152     // We're done with marking.
  1159     JavaThread::satb_mark_queue_set().set_active_all_threads(false);
  1153     JavaThread::satb_mark_queue_set().set_active_all_threads(false);
  1160 
  1154 
  1161     if (VerifyDuringGC) {
  1155     if (VerifyDuringGC) {
  1162       g1h->prepare_for_verify();
  1156       HandleMark hm;  // handle scope
  1163       g1h->verify(/* allow_dirty */      true,
  1157       gclog_or_tty->print(" VerifyDuringGC:(after)");
  1164                   /* silent */           false,
  1158       Universe::heap()->prepare_for_verify();
  1165                   /* use_prev_marking */ false);
  1159       Universe::heap()->verify(/* allow_dirty */      true,
       
  1160                                /* silent */           false,
       
  1161                                /* use_prev_marking */ false);
  1166     }
  1162     }
  1167   }
  1163   }
  1168 
  1164 
  1169 #if VERIFY_OBJS_PROCESSED
  1165 #if VERIFY_OBJS_PROCESSED
  1170   _scan_obj_cl.objs_processed = 0;
  1166   _scan_obj_cl.objs_processed = 0;
  1656   if (has_aborted()) {
  1652   if (has_aborted()) {
  1657     g1h->set_marking_complete(); // So bitmap clearing isn't confused
  1653     g1h->set_marking_complete(); // So bitmap clearing isn't confused
  1658     return;
  1654     return;
  1659   }
  1655   }
  1660 
  1656 
       
  1657   if (VerifyDuringGC) {
       
  1658     HandleMark hm;  // handle scope
       
  1659     gclog_or_tty->print(" VerifyDuringGC:(before)");
       
  1660     Universe::heap()->prepare_for_verify();
       
  1661     Universe::verify(/* allow dirty  */ true,
       
  1662                      /* silent       */ false,
       
  1663                      /* prev marking */ true);
       
  1664   }
       
  1665 
  1661   _cleanup_co_tracker.disable();
  1666   _cleanup_co_tracker.disable();
  1662 
  1667 
  1663   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
  1668   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
  1664   g1p->record_concurrent_mark_cleanup_start();
  1669   g1p->record_concurrent_mark_cleanup_start();
  1665 
  1670 
  1788   // We need to make this be a "collection" so any collection pause that
  1793   // We need to make this be a "collection" so any collection pause that
  1789   // races with it goes around and waits for completeCleanup to finish.
  1794   // races with it goes around and waits for completeCleanup to finish.
  1790   g1h->increment_total_collections();
  1795   g1h->increment_total_collections();
  1791 
  1796 
  1792   if (VerifyDuringGC) {
  1797   if (VerifyDuringGC) {
  1793     g1h->prepare_for_verify();
  1798     HandleMark hm;  // handle scope
  1794     g1h->verify(/* allow_dirty */      true,
  1799     gclog_or_tty->print(" VerifyDuringGC:(after)");
  1795                 /* silent */           false,
  1800     Universe::heap()->prepare_for_verify();
  1796                 /* use_prev_marking */ true);
  1801     Universe::verify(/* allow dirty  */ true,
       
  1802                      /* silent       */ false,
       
  1803                      /* prev marking */ true);
  1797   }
  1804   }
  1798 }
  1805 }
  1799 
  1806 
  1800 void ConcurrentMark::completeCleanup() {
  1807 void ConcurrentMark::completeCleanup() {
  1801   // A full collection intervened.
  1808   // A full collection intervened.
  1850   G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
  1857   G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
  1851                        CMBitMap* bitMap) :
  1858                        CMBitMap* bitMap) :
  1852     _g1(g1), _cm(cm),
  1859     _g1(g1), _cm(cm),
  1853     _bitMap(bitMap) {}
  1860     _bitMap(bitMap) {}
  1854 
  1861 
  1855   void do_oop(narrowOop* p) {
  1862   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  1856     guarantee(false, "NYI");
  1863   virtual void do_oop(      oop* p) { do_oop_work(p); }
  1857   }
  1864 
  1858 
  1865   template <class T> void do_oop_work(T* p) {
  1859   void do_oop(oop* p) {
  1866     oop thisOop = oopDesc::load_decode_heap_oop(p);
  1860     oop thisOop = *p;
       
  1861     HeapWord* addr = (HeapWord*)thisOop;
  1867     HeapWord* addr = (HeapWord*)thisOop;
  1862     if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) {
  1868     if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) {
  1863       _bitMap->mark(addr);
  1869       _bitMap->mark(addr);
  1864       _cm->mark_stack_push(thisOop);
  1870       _cm->mark_stack_push(thisOop);
  1865     }
  1871     }
  2014 
  2020 
  2015 public:
  2021 public:
  2016   ReachablePrinterOopClosure(CMBitMapRO* bitmap, outputStream* out) :
  2022   ReachablePrinterOopClosure(CMBitMapRO* bitmap, outputStream* out) :
  2017     _bitmap(bitmap), _g1h(G1CollectedHeap::heap()), _out(out) { }
  2023     _bitmap(bitmap), _g1h(G1CollectedHeap::heap()), _out(out) { }
  2018 
  2024 
  2019   void do_oop(narrowOop* p) {
  2025   void do_oop(narrowOop* p) { do_oop_work(p); }
  2020     guarantee(false, "NYI");
  2026   void do_oop(      oop* p) { do_oop_work(p); }
  2021   }
  2027 
  2022 
  2028   template <class T> void do_oop_work(T* p) {
  2023   void do_oop(oop* p) {
  2029     oop         obj = oopDesc::load_decode_heap_oop(p);
  2024     oop         obj = *p;
       
  2025     const char* str = NULL;
  2030     const char* str = NULL;
  2026     const char* str2 = "";
  2031     const char* str2 = "";
  2027 
  2032 
  2028     if (!_g1h->is_in_g1_reserved(obj))
  2033     if (!_g1h->is_in_g1_reserved(obj))
  2029       str = "outside G1 reserved";
  2034       str = "outside G1 reserved";
  2161     gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
  2166     gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
  2162                            (void*) obj);
  2167                            (void*) obj);
  2163 
  2168 
  2164 
  2169 
  2165   HeapWord* objAddr = (HeapWord*) obj;
  2170   HeapWord* objAddr = (HeapWord*) obj;
       
  2171   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
  2166   if (_g1h->is_in_g1_reserved(objAddr)) {
  2172   if (_g1h->is_in_g1_reserved(objAddr)) {
  2167     tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
  2173     tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
  2168     HeapRegion* hr = _g1h->heap_region_containing(obj);
  2174     HeapRegion* hr = _g1h->heap_region_containing(obj);
  2169     if (_g1h->is_obj_ill(obj, hr)) {
  2175     if (_g1h->is_obj_ill(obj, hr)) {
  2170       if (verbose_high())
  2176       if (verbose_high())
  2378       _ms_ind--;
  2384       _ms_ind--;
  2379       return _ms[_ms_ind];
  2385       return _ms[_ms_ind];
  2380     }
  2386     }
  2381   }
  2387   }
  2382 
  2388 
  2383   bool drain() {
  2389   template <class T> bool drain() {
  2384     while (_ms_ind > 0) {
  2390     while (_ms_ind > 0) {
  2385       oop obj = pop();
  2391       oop obj = pop();
  2386       assert(obj != NULL, "Since index was non-zero.");
  2392       assert(obj != NULL, "Since index was non-zero.");
  2387       if (obj->is_objArray()) {
  2393       if (obj->is_objArray()) {
  2388         jint arr_ind = _array_ind_stack[_ms_ind];
  2394         jint arr_ind = _array_ind_stack[_ms_ind];
  2392         if (next_arr_ind < len) {
  2398         if (next_arr_ind < len) {
  2393           push(obj, next_arr_ind);
  2399           push(obj, next_arr_ind);
  2394         }
  2400         }
  2395         // Now process this portion of this one.
  2401         // Now process this portion of this one.
  2396         int lim = MIN2(next_arr_ind, len);
  2402         int lim = MIN2(next_arr_ind, len);
  2397         assert(!UseCompressedOops, "This needs to be fixed");
       
  2398         for (int j = arr_ind; j < lim; j++) {
  2403         for (int j = arr_ind; j < lim; j++) {
  2399           do_oop(aobj->obj_at_addr<oop>(j));
  2404           do_oop(aobj->obj_at_addr<T>(j));
  2400         }
  2405         }
  2401 
  2406 
  2402       } else {
  2407       } else {
  2403         obj->oop_iterate(this);
  2408         obj->oop_iterate(this);
  2404       }
  2409       }
  2421   ~CSMarkOopClosure() {
  2426   ~CSMarkOopClosure() {
  2422     FREE_C_HEAP_ARRAY(oop, _ms);
  2427     FREE_C_HEAP_ARRAY(oop, _ms);
  2423     FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
  2428     FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
  2424   }
  2429   }
  2425 
  2430 
  2426   void do_oop(narrowOop* p) {
  2431   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  2427     guarantee(false, "NYI");
  2432   virtual void do_oop(      oop* p) { do_oop_work(p); }
  2428   }
  2433 
  2429 
  2434   template <class T> void do_oop_work(T* p) {
  2430   void do_oop(oop* p) {
  2435     T heap_oop = oopDesc::load_heap_oop(p);
  2431     oop obj = *p;
  2436     if (oopDesc::is_null(heap_oop)) return;
  2432     if (obj == NULL) return;
  2437     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2433     if (obj->is_forwarded()) {
  2438     if (obj->is_forwarded()) {
  2434       // If the object has already been forwarded, we have to make sure
  2439       // If the object has already been forwarded, we have to make sure
  2435       // that it's marked.  So follow the forwarding pointer.  Note that
  2440       // that it's marked.  So follow the forwarding pointer.  Note that
  2436       // this does the right thing for self-forwarding pointers in the
  2441       // this does the right thing for self-forwarding pointers in the
  2437       // evacuation failure case.
  2442       // evacuation failure case.
  2476            "address out of range");
  2481            "address out of range");
  2477     assert(_bitMap->isMarked(addr), "tautology");
  2482     assert(_bitMap->isMarked(addr), "tautology");
  2478     oop obj = oop(addr);
  2483     oop obj = oop(addr);
  2479     if (!obj->is_forwarded()) {
  2484     if (!obj->is_forwarded()) {
  2480       if (!_oop_cl.push(obj)) return false;
  2485       if (!_oop_cl.push(obj)) return false;
  2481       if (!_oop_cl.drain()) return false;
  2486       if (UseCompressedOops) {
       
  2487         if (!_oop_cl.drain<narrowOop>()) return false;
       
  2488       } else {
       
  2489         if (!_oop_cl.drain<oop>()) return false;
       
  2490       }
  2482     }
  2491     }
  2483     // Otherwise...
  2492     // Otherwise...
  2484     return true;
  2493     return true;
  2485   }
  2494   }
  2486 };
  2495 };
  2634   }
  2643   }
  2635 }
  2644 }
  2636 
  2645 
  2637 // abandon current marking iteration due to a Full GC
  2646 // abandon current marking iteration due to a Full GC
  2638 void ConcurrentMark::abort() {
  2647 void ConcurrentMark::abort() {
  2639   // If we're not marking, nothing to do.
       
  2640   if (!G1ConcMark) return;
       
  2641 
       
  2642   // Clear all marks to force marking thread to do nothing
  2648   // Clear all marks to force marking thread to do nothing
  2643   _nextMarkBitMap->clearAll();
  2649   _nextMarkBitMap->clearAll();
  2644   // Empty mark stack
  2650   // Empty mark stack
  2645   clear_marking_state();
  2651   clear_marking_state();
  2646   for (int i = 0; i < (int)_max_task_num; ++i)
  2652   for (int i = 0; i < (int)_max_task_num; ++i)
  2812   G1CollectedHeap*   _g1h;
  2818   G1CollectedHeap*   _g1h;
  2813   ConcurrentMark*    _cm;
  2819   ConcurrentMark*    _cm;
  2814   CMTask*            _task;
  2820   CMTask*            _task;
  2815 
  2821 
  2816 public:
  2822 public:
  2817   void do_oop(narrowOop* p) {
  2823   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  2818     guarantee(false, "NYI");
  2824   virtual void do_oop(      oop* p) { do_oop_work(p); }
  2819   }
  2825 
  2820 
  2826   template <class T> void do_oop_work(T* p) {
  2821   void do_oop(oop* p) {
       
  2822     tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" );
  2827     tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" );
  2823 
  2828     tmp_guarantee_CM( !_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), "invariant" );
  2824     oop obj = *p;
  2829 
       
  2830     oop obj = oopDesc::load_decode_heap_oop(p);
  2825     if (_cm->verbose_high())
  2831     if (_cm->verbose_high())
  2826       gclog_or_tty->print_cr("[%d] we're looking at location "
  2832       gclog_or_tty->print_cr("[%d] we're looking at location "
  2827                              "*"PTR_FORMAT" = "PTR_FORMAT,
  2833                              "*"PTR_FORMAT" = "PTR_FORMAT,
  2828                              _task->task_id(), p, (void*) obj);
  2834                              _task->task_id(), p, (void*) obj);
  2829     _task->deal_with_reference(obj);
  2835     _task->deal_with_reference(obj);
  2965                            _task_id, (void*) obj);
  2971                            _task_id, (void*) obj);
  2966 
  2972 
  2967   ++_refs_reached;
  2973   ++_refs_reached;
  2968 
  2974 
  2969   HeapWord* objAddr = (HeapWord*) obj;
  2975   HeapWord* objAddr = (HeapWord*) obj;
       
  2976   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
  2970   if (_g1h->is_in_g1_reserved(objAddr)) {
  2977   if (_g1h->is_in_g1_reserved(objAddr)) {
  2971     tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
  2978     tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
  2972     HeapRegion* hr =  _g1h->heap_region_containing(obj);
  2979     HeapRegion* hr =  _g1h->heap_region_containing(obj);
  2973     if (_g1h->is_obj_ill(obj, hr)) {
  2980     if (_g1h->is_obj_ill(obj, hr)) {
  2974       if (_cm->verbose_high())
  2981       if (_cm->verbose_high())
  3028 }
  3035 }
  3029 
  3036 
  3030 void CMTask::push(oop obj) {
  3037 void CMTask::push(oop obj) {
  3031   HeapWord* objAddr = (HeapWord*) obj;
  3038   HeapWord* objAddr = (HeapWord*) obj;
  3032   tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" );
  3039   tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" );
       
  3040   tmp_guarantee_CM( !_g1h->heap_region_containing(objAddr)->is_on_free_list(), "invariant" );
  3033   tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" );
  3041   tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" );
  3034   tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" );
  3042   tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" );
  3035 
  3043 
  3036   if (_cm->verbose_high())
  3044   if (_cm->verbose_high())
  3037     gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
  3045     gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
  3272       if (_cm->verbose_high())
  3280       if (_cm->verbose_high())
  3273         gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
  3281         gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
  3274                                (void*) obj);
  3282                                (void*) obj);
  3275 
  3283 
  3276       tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj),
  3284       tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj),
       
  3285                         "invariant" );
       
  3286       tmp_guarantee_CM( !_g1h->heap_region_containing(obj)->is_on_free_list(),
  3277                         "invariant" );
  3287                         "invariant" );
  3278 
  3288 
  3279       scan_object(obj);
  3289       scan_object(obj);
  3280 
  3290 
  3281       if (_task_queue->size() <= target_size || has_aborted())
  3291       if (_task_queue->size() <= target_size || has_aborted())