hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
changeset 2154 72a9b7284ccf
parent 2105 347008ce7984
parent 2152 99356e7f31b1
child 2252 703d28e44a42
equal deleted inserted replaced
2106:ec595a5e793e 2154:72a9b7284ccf
   134     return true;
   134     return true;
   135   }
   135   }
   136   int calls() { return _calls; }
   136   int calls() { return _calls; }
   137 };
   137 };
   138 
   138 
       
   139 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
       
   140 public:
       
   141   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
       
   142     *card_ptr = CardTableModRefBS::dirty_card_val();
       
   143     return true;
       
   144   }
       
   145 };
       
   146 
   139 YoungList::YoungList(G1CollectedHeap* g1h)
   147 YoungList::YoungList(G1CollectedHeap* g1h)
   140   : _g1h(g1h), _head(NULL),
   148   : _g1h(g1h), _head(NULL),
   141     _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
   149     _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
   142     _length(0), _scan_only_length(0),
   150     _length(0), _scan_only_length(0),
   143     _last_sampled_rs_lengths(0),
   151     _last_sampled_rs_lengths(0),
   810     }
   818     }
   811     return false;
   819     return false;
   812   }
   820   }
   813 };
   821 };
   814 
   822 
       
   823 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
       
   824   G1CollectedHeap*   _g1h;
       
   825   UpdateRSOopClosure _cl;
       
   826   int                _worker_i;
       
   827 public:
       
   828   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
       
   829     _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
       
   830     _worker_i(worker_i),
       
   831     _g1h(g1)
       
   832   { }
       
   833   bool doHeapRegion(HeapRegion* r) {
       
   834     if (!r->continuesHumongous()) {
       
   835       _cl.set_from(r);
       
   836       r->oop_iterate(&_cl);
       
   837     }
       
   838     return false;
       
   839   }
       
   840 };
       
   841 
       
   842 class ParRebuildRSTask: public AbstractGangTask {
       
   843   G1CollectedHeap* _g1;
       
   844 public:
       
   845   ParRebuildRSTask(G1CollectedHeap* g1)
       
   846     : AbstractGangTask("ParRebuildRSTask"),
       
   847       _g1(g1)
       
   848   { }
       
   849 
       
   850   void work(int i) {
       
   851     RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
       
   852     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
       
   853                                          HeapRegion::RebuildRSClaimValue);
       
   854   }
       
   855 };
       
   856 
   815 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
   857 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
   816                                     size_t word_size) {
   858                                     size_t word_size) {
   817   ResourceMark rm;
   859   ResourceMark rm;
   818 
   860 
   819   if (full && DisableExplicitGC) {
   861   if (full && DisableExplicitGC) {
   916     }
   958     }
   917     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   959     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   918 
   960 
   919     reset_gc_time_stamp();
   961     reset_gc_time_stamp();
   920     // Since everything potentially moved, we will clear all remembered
   962     // Since everything potentially moved, we will clear all remembered
   921     // sets, and clear all cards.  Later we will also cards in the used
   963     // sets, and clear all cards.  Later we will rebuild remebered
   922     // portion of the heap after the resizing (which could be a shrinking.)
   964     // sets. We will also reset the GC time stamps of the regions.
   923     // We will also reset the GC time stamps of the regions.
       
   924     PostMCRemSetClearClosure rs_clear(mr_bs());
   965     PostMCRemSetClearClosure rs_clear(mr_bs());
   925     heap_region_iterate(&rs_clear);
   966     heap_region_iterate(&rs_clear);
   926 
   967 
   927     // Resize the heap if necessary.
   968     // Resize the heap if necessary.
   928     resize_if_necessary_after_full_collection(full ? 0 : word_size);
   969     resize_if_necessary_after_full_collection(full ? 0 : word_size);
   929 
   970 
   930     // Since everything potentially moved, we will clear all remembered
       
   931     // sets, but also dirty all cards corresponding to used regions.
       
   932     PostMCRemSetInvalidateClosure rs_invalidate(mr_bs());
       
   933     heap_region_iterate(&rs_invalidate);
       
   934     if (_cg1r->use_cache()) {
   971     if (_cg1r->use_cache()) {
   935       _cg1r->clear_and_record_card_counts();
   972       _cg1r->clear_and_record_card_counts();
   936       _cg1r->clear_hot_cache();
   973       _cg1r->clear_hot_cache();
       
   974     }
       
   975 
       
   976     // Rebuild remembered sets of all regions.
       
   977     if (ParallelGCThreads > 0) {
       
   978       ParRebuildRSTask rebuild_rs_task(this);
       
   979       assert(check_heap_region_claim_values(
       
   980              HeapRegion::InitialClaimValue), "sanity check");
       
   981       set_par_threads(workers()->total_workers());
       
   982       workers()->run_task(&rebuild_rs_task);
       
   983       set_par_threads(0);
       
   984       assert(check_heap_region_claim_values(
       
   985              HeapRegion::RebuildRSClaimValue), "sanity check");
       
   986       reset_heap_region_claim_values();
       
   987     } else {
       
   988       RebuildRSOutOfRegionClosure rebuild_rs(this);
       
   989       heap_region_iterate(&rebuild_rs);
   937     }
   990     }
   938 
   991 
   939     if (PrintGC) {
   992     if (PrintGC) {
   940       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
   993       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
   941     }
   994     }
   959     // dirty-card logging system, some cards may be dirty by weak-ref
  1012     // dirty-card logging system, some cards may be dirty by weak-ref
   960     // processing, and may be enqueued.  But the whole card table is
  1013     // processing, and may be enqueued.  But the whole card table is
   961     // dirtied, so this should abandon those logs, and set "do_traversal"
  1014     // dirtied, so this should abandon those logs, and set "do_traversal"
   962     // to true.
  1015     // to true.
   963     concurrent_g1_refine()->set_pya_restart();
  1016     concurrent_g1_refine()->set_pya_restart();
   964 
  1017     assert(!G1DeferredRSUpdate
       
  1018            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
   965     assert(regions_accounted_for(), "Region leakage!");
  1019     assert(regions_accounted_for(), "Region leakage!");
   966   }
  1020   }
   967 
  1021 
   968   if (g1_policy()->in_young_gc_mode()) {
  1022   if (g1_policy()->in_young_gc_mode()) {
   969     _young_list->reset_sampled_info();
  1023     _young_list->reset_sampled_info();
  1464     JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1518     JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1465                                                   DirtyCardQ_FL_lock,
  1519                                                   DirtyCardQ_FL_lock,
  1466                                                   G1DirtyCardQueueMax,
  1520                                                   G1DirtyCardQueueMax,
  1467                                                   Shared_DirtyCardQ_lock);
  1521                                                   Shared_DirtyCardQ_lock);
  1468   }
  1522   }
       
  1523   if (G1DeferredRSUpdate) {
       
  1524     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
       
  1525                                       DirtyCardQ_FL_lock,
       
  1526                                       0,
       
  1527                                       Shared_DirtyCardQ_lock,
       
  1528                                       &JavaThread::dirty_card_queue_set());
       
  1529   }
  1469   // In case we're keeping closure specialization stats, initialize those
  1530   // In case we're keeping closure specialization stats, initialize those
  1470   // counts and that mechanism.
  1531   // counts and that mechanism.
  1471   SpecializationStats::clear();
  1532   SpecializationStats::clear();
  1472 
  1533 
  1473   _gc_alloc_region_list = NULL;
  1534   _gc_alloc_region_list = NULL;
  2314 };
  2375 };
  2315 
  2376 
  2316 void
  2377 void
  2317 G1CollectedHeap::checkConcurrentMark() {
  2378 G1CollectedHeap::checkConcurrentMark() {
  2318     VerifyMarkedObjsClosure verifycl(this);
  2379     VerifyMarkedObjsClosure verifycl(this);
  2319     doConcurrentMark();
       
  2320     //    MutexLockerEx x(getMarkBitMapLock(),
  2380     //    MutexLockerEx x(getMarkBitMapLock(),
  2321     //              Mutex::_no_safepoint_check_flag);
  2381     //              Mutex::_no_safepoint_check_flag);
  2322     object_iterate(&verifycl);
  2382     object_iterate(&verifycl);
  2323 }
  2383 }
  2324 
  2384 
  2491     g1_policy()->record_collection_pause_start(start_time_sec,
  2551     g1_policy()->record_collection_pause_start(start_time_sec,
  2492                                                start_used_bytes);
  2552                                                start_used_bytes);
  2493 
  2553 
  2494     guarantee(_in_cset_fast_test == NULL, "invariant");
  2554     guarantee(_in_cset_fast_test == NULL, "invariant");
  2495     guarantee(_in_cset_fast_test_base == NULL, "invariant");
  2555     guarantee(_in_cset_fast_test_base == NULL, "invariant");
  2496     _in_cset_fast_test_length = n_regions();
  2556     _in_cset_fast_test_length = max_regions();
  2497     _in_cset_fast_test_base =
  2557     _in_cset_fast_test_base =
  2498                              NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  2558                              NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  2499     memset(_in_cset_fast_test_base, false,
  2559     memset(_in_cset_fast_test_base, false,
  2500                                      _in_cset_fast_test_length * sizeof(bool));
  2560                                      _in_cset_fast_test_length * sizeof(bool));
  2501     // We're biasing _in_cset_fast_test to avoid subtracting the
  2561     // We're biasing _in_cset_fast_test to avoid subtracting the
  2511     if (g1_policy()->should_initiate_conc_mark()) {
  2571     if (g1_policy()->should_initiate_conc_mark()) {
  2512       concurrent_mark()->checkpointRootsInitialPre();
  2572       concurrent_mark()->checkpointRootsInitialPre();
  2513     }
  2573     }
  2514     save_marks();
  2574     save_marks();
  2515 
  2575 
  2516     // We must do this before any possible evacuation that should propogate
  2576     // We must do this before any possible evacuation that should propagate
  2517     // marks, including evacuation of popular objects in a popular pause.
  2577     // marks, including evacuation of popular objects in a popular pause.
  2518     if (mark_in_progress()) {
  2578     if (mark_in_progress()) {
  2519       double start_time_sec = os::elapsedTime();
  2579       double start_time_sec = os::elapsedTime();
  2520 
  2580 
  2521       _cm->drainAllSATBBuffers();
  2581       _cm->drainAllSATBBuffers();
  2624 #if SCAN_ONLY_VERBOSE
  2684 #if SCAN_ONLY_VERBOSE
  2625     _young_list->print();
  2685     _young_list->print();
  2626 #endif // SCAN_ONLY_VERBOSE
  2686 #endif // SCAN_ONLY_VERBOSE
  2627 
  2687 
  2628     double end_time_sec = os::elapsedTime();
  2688     double end_time_sec = os::elapsedTime();
  2629     if (!evacuation_failed()) {
  2689     double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  2630       g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
  2690     g1_policy()->record_pause_time_ms(pause_time_ms);
  2631     }
       
  2632     GCOverheadReporter::recordSTWEnd(end_time_sec);
  2691     GCOverheadReporter::recordSTWEnd(end_time_sec);
  2633     g1_policy()->record_collection_pause_end(popular_region != NULL,
  2692     g1_policy()->record_collection_pause_end(popular_region != NULL,
  2634                                              abandoned);
  2693                                              abandoned);
  2635 
  2694 
  2636     assert(regions_accounted_for(), "Region leakage.");
  2695     assert(regions_accounted_for(), "Region leakage.");
  2917 #endif // G1_DEBUG
  2976 #endif // G1_DEBUG
  2918     }
  2977     }
  2919   }
  2978   }
  2920 };
  2979 };
  2921 
  2980 
  2922 class RecreateRSetEntriesClosure: public OopClosure {
  2981 class UpdateRSetImmediate : public OopsInHeapRegionClosure {
  2923 private:
  2982 private:
  2924   G1CollectedHeap* _g1;
  2983   G1CollectedHeap* _g1;
  2925   G1RemSet* _g1_rem_set;
  2984   G1RemSet* _g1_rem_set;
  2926   HeapRegion* _from;
       
  2927 public:
  2985 public:
  2928   RecreateRSetEntriesClosure(G1CollectedHeap* g1, HeapRegion* from) :
  2986   UpdateRSetImmediate(G1CollectedHeap* g1) :
  2929     _g1(g1), _g1_rem_set(g1->g1_rem_set()), _from(from)
  2987     _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
  2930   {}
       
  2931 
  2988 
  2932   void do_oop(narrowOop* p) {
  2989   void do_oop(narrowOop* p) {
  2933     guarantee(false, "NYI");
  2990     guarantee(false, "NYI");
  2934   }
  2991   }
  2935   void do_oop(oop* p) {
  2992   void do_oop(oop* p) {
  2936     assert(_from->is_in_reserved(p), "paranoia");
  2993     assert(_from->is_in_reserved(p), "paranoia");
  2937     if (*p != NULL) {
  2994     if (*p != NULL && !_from->is_survivor()) {
  2938       _g1_rem_set->write_ref(_from, p);
  2995       _g1_rem_set->par_write_ref(_from, p, 0);
  2939     }
  2996     }
  2940   }
  2997   }
  2941 };
  2998 };
       
  2999 
       
  3000 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
       
  3001 private:
       
  3002   G1CollectedHeap* _g1;
       
  3003   DirtyCardQueue *_dcq;
       
  3004   CardTableModRefBS* _ct_bs;
       
  3005 
       
  3006 public:
       
  3007   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
       
  3008     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
       
  3009 
       
  3010   void do_oop(narrowOop* p) {
       
  3011     guarantee(false, "NYI");
       
  3012   }
       
  3013   void do_oop(oop* p) {
       
  3014     assert(_from->is_in_reserved(p), "paranoia");
       
  3015     if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
       
  3016       size_t card_index = _ct_bs->index_for(p);
       
  3017       if (_ct_bs->mark_card_deferred(card_index)) {
       
  3018         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
       
  3019       }
       
  3020     }
       
  3021   }
       
  3022 };
       
  3023 
       
  3024 
  2942 
  3025 
  2943 class RemoveSelfPointerClosure: public ObjectClosure {
  3026 class RemoveSelfPointerClosure: public ObjectClosure {
  2944 private:
  3027 private:
  2945   G1CollectedHeap* _g1;
  3028   G1CollectedHeap* _g1;
  2946   ConcurrentMark* _cm;
  3029   ConcurrentMark* _cm;
  2947   HeapRegion* _hr;
  3030   HeapRegion* _hr;
  2948   size_t _prev_marked_bytes;
  3031   size_t _prev_marked_bytes;
  2949   size_t _next_marked_bytes;
  3032   size_t _next_marked_bytes;
       
  3033   OopsInHeapRegionClosure *_cl;
  2950 public:
  3034 public:
  2951   RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr) :
  3035   RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
  2952     _g1(g1), _cm(_g1->concurrent_mark()), _hr(hr),
  3036     _g1(g1), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
  2953     _prev_marked_bytes(0), _next_marked_bytes(0)
  3037     _next_marked_bytes(0), _cl(cl) {}
  2954   {}
       
  2955 
  3038 
  2956   size_t prev_marked_bytes() { return _prev_marked_bytes; }
  3039   size_t prev_marked_bytes() { return _prev_marked_bytes; }
  2957   size_t next_marked_bytes() { return _next_marked_bytes; }
  3040   size_t next_marked_bytes() { return _next_marked_bytes; }
  2958 
  3041 
  2959   // The original idea here was to coalesce evacuated and dead objects.
  3042   // The original idea here was to coalesce evacuated and dead objects.
  2987       // card in the collection set and coming across an array that
  3070       // card in the collection set and coming across an array that
  2988       // was being chunked and looking malformed. The problem is
  3071       // was being chunked and looking malformed. The problem is
  2989       // that, if evacuation fails, we might have remembered set
  3072       // that, if evacuation fails, we might have remembered set
  2990       // entries missing given that we skipped cards on the
  3073       // entries missing given that we skipped cards on the
  2991       // collection set. So, we'll recreate such entries now.
  3074       // collection set. So, we'll recreate such entries now.
  2992       RecreateRSetEntriesClosure cl(_g1, _hr);
  3075       obj->oop_iterate(_cl);
  2993       obj->oop_iterate(&cl);
       
  2994       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3076       assert(_cm->isPrevMarked(obj), "Should be marked!");
  2995     } else {
  3077     } else {
  2996       // The object has been either evacuated or is dead. Fill it with a
  3078       // The object has been either evacuated or is dead. Fill it with a
  2997       // dummy object.
  3079       // dummy object.
  2998       MemRegion mr((HeapWord*)obj, obj->size());
  3080       MemRegion mr((HeapWord*)obj, obj->size());
  3001     }
  3083     }
  3002   }
  3084   }
  3003 };
  3085 };
  3004 
  3086 
  3005 void G1CollectedHeap::remove_self_forwarding_pointers() {
  3087 void G1CollectedHeap::remove_self_forwarding_pointers() {
       
  3088   UpdateRSetImmediate immediate_update(_g1h);
       
  3089   DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
       
  3090   UpdateRSetDeferred deferred_update(_g1h, &dcq);
       
  3091   OopsInHeapRegionClosure *cl;
       
  3092   if (G1DeferredRSUpdate) {
       
  3093     cl = &deferred_update;
       
  3094   } else {
       
  3095     cl = &immediate_update;
       
  3096   }
  3006   HeapRegion* cur = g1_policy()->collection_set();
  3097   HeapRegion* cur = g1_policy()->collection_set();
  3007 
       
  3008   while (cur != NULL) {
  3098   while (cur != NULL) {
  3009     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3099     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3010 
  3100 
       
  3101     RemoveSelfPointerClosure rspc(_g1h, cl);
  3011     if (cur->evacuation_failed()) {
  3102     if (cur->evacuation_failed()) {
  3012       RemoveSelfPointerClosure rspc(_g1h, cur);
       
  3013       assert(cur->in_collection_set(), "bad CS");
  3103       assert(cur->in_collection_set(), "bad CS");
       
  3104       cl->set_region(cur);
  3014       cur->object_iterate(&rspc);
  3105       cur->object_iterate(&rspc);
  3015 
  3106 
  3016       // A number of manipulations to make the TAMS be the current top,
  3107       // A number of manipulations to make the TAMS be the current top,
  3017       // and the marked bytes be the ones observed in the iteration.
  3108       // and the marked bytes be the ones observed in the iteration.
  3018       if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
  3109       if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
  3517 
  3608 
  3518 class G1ParScanThreadState : public StackObj {
  3609 class G1ParScanThreadState : public StackObj {
  3519 protected:
  3610 protected:
  3520   G1CollectedHeap* _g1h;
  3611   G1CollectedHeap* _g1h;
  3521   RefToScanQueue*  _refs;
  3612   RefToScanQueue*  _refs;
       
  3613   DirtyCardQueue   _dcq;
       
  3614   CardTableModRefBS* _ct_bs;
       
  3615   G1RemSet* _g1_rem;
  3522 
  3616 
  3523   typedef GrowableArray<oop*> OverflowQueue;
  3617   typedef GrowableArray<oop*> OverflowQueue;
  3524   OverflowQueue* _overflowed_refs;
  3618   OverflowQueue* _overflowed_refs;
  3525 
  3619 
  3526   G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
  3620   G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
  3558 
  3652 
  3559   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
  3653   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
  3560 
  3654 
  3561   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
  3655   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
  3562 
  3656 
       
  3657   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
       
  3658   CardTableModRefBS* ctbs()                      { return _ct_bs; }
       
  3659 
       
  3660   void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
       
  3661     _g1_rem->par_write_ref(from, p, tid);
       
  3662   }
       
  3663 
       
  3664   void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
       
  3665     // If the new value of the field points to the same region or
       
  3666     // is the to-space, we don't need to include it in the Rset updates.
       
  3667     if (!from->is_in_reserved(*p) && !from->is_survivor()) {
       
  3668       size_t card_index = ctbs()->index_for(p);
       
  3669       // If the card hasn't been added to the buffer, do it.
       
  3670       if (ctbs()->mark_card_deferred(card_index)) {
       
  3671         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
       
  3672       }
       
  3673     }
       
  3674   }
       
  3675 
  3563 public:
  3676 public:
  3564   G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
  3677   G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
  3565     : _g1h(g1h),
  3678     : _g1h(g1h),
  3566       _refs(g1h->task_queue(queue_num)),
  3679       _refs(g1h->task_queue(queue_num)),
       
  3680       _dcq(&g1h->dirty_card_queue_set()),
       
  3681       _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
       
  3682       _g1_rem(g1h->g1_rem_set()),
  3567       _hash_seed(17), _queue_num(queue_num),
  3683       _hash_seed(17), _queue_num(queue_num),
  3568       _term_attempts(0),
  3684       _term_attempts(0),
  3569       _age_table(false),
  3685       _age_table(false),
  3570 #if G1_DETAILED_STATS
  3686 #if G1_DETAILED_STATS
  3571       _pushes(0), _pops(0), _steals(0),
  3687       _pushes(0), _pops(0), _steals(0),
  3639   }
  3755   }
  3640 
  3756 
  3641   int refs_to_scan()                             { return refs()->size();                 }
  3757   int refs_to_scan()                             { return refs()->size();                 }
  3642   int overflowed_refs_to_scan()                  { return overflowed_refs()->length();    }
  3758   int overflowed_refs_to_scan()                  { return overflowed_refs()->length();    }
  3643 
  3759 
       
  3760   void update_rs(HeapRegion* from, oop* p, int tid) {
       
  3761     if (G1DeferredRSUpdate) {
       
  3762       deferred_rs_update(from, p, tid);
       
  3763     } else {
       
  3764       immediate_rs_update(from, p, tid);
       
  3765     }
       
  3766   }
       
  3767 
  3644   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
  3768   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
  3645 
  3769 
  3646     HeapWord* obj = NULL;
  3770     HeapWord* obj = NULL;
  3647     if (word_sz * 100 <
  3771     if (word_sz * 100 <
  3648         (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) *
  3772         (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) *
  3807       }
  3931       }
  3808     }
  3932     }
  3809   }
  3933   }
  3810 };
  3934 };
  3811 
  3935 
  3812 
       
  3813 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  3936 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  3814   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  3937   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  3815   _par_scan_state(par_scan_state) { }
  3938   _par_scan_state(par_scan_state) { }
  3816 
  3939 
  3817 // This closure is applied to the fields of the objects that have just been copied.
  3940 // This closure is applied to the fields of the objects that have just been copied.
  3833       // problems before we go into push_on_queue to know where the
  3956       // problems before we go into push_on_queue to know where the
  3834       // problem is coming from
  3957       // problem is coming from
  3835       assert(obj == *p, "the value of *p should not have changed");
  3958       assert(obj == *p, "the value of *p should not have changed");
  3836       _par_scan_state->push_on_queue(p);
  3959       _par_scan_state->push_on_queue(p);
  3837     } else {
  3960     } else {
  3838       _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
  3961       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  3839     }
  3962     }
  3840   }
  3963   }
  3841 }
  3964 }
  3842 
  3965 
  3843 void G1ParCopyHelper::mark_forwardee(oop* p) {
  3966 void G1ParCopyHelper::mark_forwardee(oop* p) {
  3971     } else {
  4094     } else {
  3972       *p = copy_to_survivor_space(obj);
  4095       *p = copy_to_survivor_space(obj);
  3973     }
  4096     }
  3974     // When scanning the RS, we only care about objs in CS.
  4097     // When scanning the RS, we only care about objs in CS.
  3975     if (barrier == G1BarrierRS) {
  4098     if (barrier == G1BarrierRS) {
  3976       _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
  4099       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  3977     }
  4100     }
  3978   }
  4101   }
  3979 
  4102 
  3980   // When scanning moved objs, must look at all oops.
  4103   // When scanning moved objs, must look at all oops.
  3981   if (barrier == G1BarrierEvac && obj != NULL) {
  4104   if (barrier == G1BarrierEvac && obj != NULL) {
  3982     _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
  4105     _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  3983   }
  4106   }
  3984 
  4107 
  3985   if (do_gen_barrier && obj != NULL) {
  4108   if (do_gen_barrier && obj != NULL) {
  3986     par_do_barrier(p);
  4109     par_do_barrier(p);
  3987   }
  4110   }
  4126     pss.set_partial_scan_closure(&partial_scan_cl);
  4249     pss.set_partial_scan_closure(&partial_scan_cl);
  4127 
  4250 
  4128     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  4251     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  4129     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  4252     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  4130     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  4253     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
       
  4254 
  4131     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  4255     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  4132     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  4256     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  4133     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  4257     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  4134 
  4258 
  4135     OopsInHeapRegionClosure        *scan_root_cl;
  4259     OopsInHeapRegionClosure        *scan_root_cl;
  4381   set_evacuation_failed(false);
  4505   set_evacuation_failed(false);
  4382 
  4506 
  4383   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4507   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4384   concurrent_g1_refine()->set_use_cache(false);
  4508   concurrent_g1_refine()->set_use_cache(false);
  4385   int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
  4509   int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
  4386 
       
  4387   set_par_threads(n_workers);
  4510   set_par_threads(n_workers);
  4388   G1ParTask g1_par_task(this, n_workers, _task_queues);
  4511   G1ParTask g1_par_task(this, n_workers, _task_queues);
  4389 
  4512 
  4390   init_for_evac_failure(NULL);
  4513   init_for_evac_failure(NULL);
  4391 
  4514 
  4392   change_strong_roots_parity();  // In preparation for parallel strong roots.
  4515   change_strong_roots_parity();  // In preparation for parallel strong roots.
  4393   rem_set()->prepare_for_younger_refs_iterate(true);
  4516   rem_set()->prepare_for_younger_refs_iterate(true);
       
  4517 
       
  4518   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  4394   double start_par = os::elapsedTime();
  4519   double start_par = os::elapsedTime();
  4395 
       
  4396   if (ParallelGCThreads > 0) {
  4520   if (ParallelGCThreads > 0) {
  4397     // The individual threads will set their evac-failure closures.
  4521     // The individual threads will set their evac-failure closures.
  4398     workers()->run_task(&g1_par_task);
  4522     workers()->run_task(&g1_par_task);
  4399   } else {
  4523   } else {
  4400     g1_par_task.work(0);
  4524     g1_par_task.work(0);
  4410   {
  4534   {
  4411     G1IsAliveClosure is_alive(this);
  4535     G1IsAliveClosure is_alive(this);
  4412     G1KeepAliveClosure keep_alive(this);
  4536     G1KeepAliveClosure keep_alive(this);
  4413     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4537     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4414   }
  4538   }
  4415 
       
  4416   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  4539   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
       
  4540 
  4417   concurrent_g1_refine()->set_use_cache(true);
  4541   concurrent_g1_refine()->set_use_cache(true);
  4418 
  4542 
  4419   finalize_for_evac_failure();
  4543   finalize_for_evac_failure();
  4420 
  4544 
  4421   // Must do this before removing self-forwarding pointers, which clears
  4545   // Must do this before removing self-forwarding pointers, which clears
  4422   // the per-region evac-failure flags.
  4546   // the per-region evac-failure flags.
  4423   concurrent_mark()->complete_marking_in_collection_set();
  4547   concurrent_mark()->complete_marking_in_collection_set();
  4424 
  4548 
  4425   if (evacuation_failed()) {
  4549   if (evacuation_failed()) {
  4426     remove_self_forwarding_pointers();
  4550     remove_self_forwarding_pointers();
  4427 
       
  4428     if (PrintGCDetails) {
  4551     if (PrintGCDetails) {
  4429       gclog_or_tty->print(" (evacuation failed)");
  4552       gclog_or_tty->print(" (evacuation failed)");
  4430     } else if (PrintGC) {
  4553     } else if (PrintGC) {
  4431       gclog_or_tty->print("--");
  4554       gclog_or_tty->print("--");
  4432     }
  4555     }
       
  4556   }
       
  4557 
       
  4558   if (G1DeferredRSUpdate) {
       
  4559     RedirtyLoggedCardTableEntryFastClosure redirty;
       
  4560     dirty_card_queue_set().set_closure(&redirty);
       
  4561     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
       
  4562     JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
       
  4563     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4433   }
  4564   }
  4434 
  4565 
  4435   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4566   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4436 }
  4567 }
  4437 
  4568