hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
changeset 25492 d27050bdfb04
parent 25491 70fb742e40aa
child 25500 4d2e06147d1e
equal deleted inserted replaced
25491:70fb742e40aa 25492:d27050bdfb04
    55 #include "gc_implementation/shared/gcHeapSummary.hpp"
    55 #include "gc_implementation/shared/gcHeapSummary.hpp"
    56 #include "gc_implementation/shared/gcTimer.hpp"
    56 #include "gc_implementation/shared/gcTimer.hpp"
    57 #include "gc_implementation/shared/gcTrace.hpp"
    57 #include "gc_implementation/shared/gcTrace.hpp"
    58 #include "gc_implementation/shared/gcTraceTime.hpp"
    58 #include "gc_implementation/shared/gcTraceTime.hpp"
    59 #include "gc_implementation/shared/isGCActiveMark.hpp"
    59 #include "gc_implementation/shared/isGCActiveMark.hpp"
       
    60 #include "memory/allocation.hpp"
    60 #include "memory/gcLocker.inline.hpp"
    61 #include "memory/gcLocker.inline.hpp"
    61 #include "memory/generationSpec.hpp"
    62 #include "memory/generationSpec.hpp"
    62 #include "memory/iterator.hpp"
    63 #include "memory/iterator.hpp"
    63 #include "memory/referenceProcessor.hpp"
    64 #include "memory/referenceProcessor.hpp"
    64 #include "oops/oop.inline.hpp"
    65 #include "oops/oop.inline.hpp"
    89 // Notes on implementation of parallelism in different tasks.
    90 // Notes on implementation of parallelism in different tasks.
    90 //
    91 //
    91 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
    92 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
    92 // The number of GC workers is passed to heap_region_par_iterate_chunked().
    93 // The number of GC workers is passed to heap_region_par_iterate_chunked().
    93 // It does use run_task() which sets _n_workers in the task.
    94 // It does use run_task() which sets _n_workers in the task.
    94 // G1ParTask executes g1_process_strong_roots() ->
    95 // G1ParTask executes g1_process_roots() ->
    95 // SharedHeap::process_strong_roots() which calls eventually to
    96 // SharedHeap::process_roots() which calls eventually to
    96 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
    97 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
    97 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
    98 // SequentialSubTasksDone.  SharedHeap::process_roots() also
    98 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
    99 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
    99 //
   100 //
   100 
   101 
   101 // Local to this file.
   102 // Local to this file.
   102 
   103 
  3377            "Expected to be executed serially by the VM thread at this point");
  3378            "Expected to be executed serially by the VM thread at this point");
  3378 
  3379 
  3379     if (!silent) { gclog_or_tty->print("Roots "); }
  3380     if (!silent) { gclog_or_tty->print("Roots "); }
  3380     VerifyRootsClosure rootsCl(vo);
  3381     VerifyRootsClosure rootsCl(vo);
  3381     VerifyKlassClosure klassCl(this, &rootsCl);
  3382     VerifyKlassClosure klassCl(this, &rootsCl);
       
  3383     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
  3382 
  3384 
  3383     // We apply the relevant closures to all the oops in the
  3385     // We apply the relevant closures to all the oops in the
  3384     // system dictionary, class loader data graph and the string table.
  3386     // system dictionary, class loader data graph, the string table
  3385     // Don't verify the code cache here, since it's verified below.
  3387     // and the nmethods in the code cache.
  3386     const int so = SO_AllClasses | SO_Strings;
       
  3387 
       
  3388     // Need cleared claim bits for the strong roots processing
       
  3389     ClassLoaderDataGraph::clear_claimed_marks();
       
  3390 
       
  3391     process_strong_roots(true,      // activate StrongRootsScope
       
  3392                          ScanningOption(so),  // roots scanning options
       
  3393                          &rootsCl,
       
  3394                          &klassCl
       
  3395                          );
       
  3396 
       
  3397     // Verify the nmethods in the code cache.
       
  3398     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
  3388     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
  3399     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
  3389     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
  3400     CodeCache::blobs_do(&blobsCl);
  3390 
       
  3391     process_all_roots(true,            // activate StrongRootsScope
       
  3392                       SO_AllCodeCache, // roots scanning options
       
  3393                       &rootsCl,
       
  3394                       &cldCl,
       
  3395                       &blobsCl);
  3401 
  3396 
  3402     bool failures = rootsCl.failures() || codeRootsCl.failures();
  3397     bool failures = rootsCl.failures() || codeRootsCl.failures();
  3403 
  3398 
  3404     if (vo != VerifyOption_G1UseMarkWord) {
  3399     if (vo != VerifyOption_G1UseMarkWord) {
  3405       // If we're verifying during a full GC then the region sets
  3400       // If we're verifying during a full GC then the region sets
  3977       gc_prologue(false);
  3972       gc_prologue(false);
  3978       increment_total_collections(false /* full gc */);
  3973       increment_total_collections(false /* full gc */);
  3979       increment_gc_time_stamp();
  3974       increment_gc_time_stamp();
  3980 
  3975 
  3981       verify_before_gc();
  3976       verify_before_gc();
       
  3977 
  3982       check_bitmaps("GC Start");
  3978       check_bitmaps("GC Start");
  3983 
  3979 
  3984       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3980       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3985 
  3981 
  3986       // Please see comment in g1CollectedHeap.hpp and
  3982       // Please see comment in g1CollectedHeap.hpp and
  4327 void G1CollectedHeap::release_mutator_alloc_region() {
  4323 void G1CollectedHeap::release_mutator_alloc_region() {
  4328   _mutator_alloc_region.release();
  4324   _mutator_alloc_region.release();
  4329   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  4325   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  4330 }
  4326 }
  4331 
  4327 
  4332 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  4328 void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
  4333   assert_at_safepoint(true /* should_be_vm_thread */);
       
  4334 
       
  4335   _survivor_gc_alloc_region.init();
       
  4336   _old_gc_alloc_region.init();
       
  4337   HeapRegion* retained_region = _retained_old_gc_alloc_region;
  4329   HeapRegion* retained_region = _retained_old_gc_alloc_region;
  4338   _retained_old_gc_alloc_region = NULL;
  4330   _retained_old_gc_alloc_region = NULL;
  4339 
  4331 
  4340   // We will discard the current GC alloc region if:
  4332   // We will discard the current GC alloc region if:
  4341   // a) it's in the collection set (it can happen!),
  4333   // a) it's in the collection set (it can happen!),
  4363     _hr_printer.reuse(retained_region);
  4355     _hr_printer.reuse(retained_region);
  4364     evacuation_info.set_alloc_regions_used_before(retained_region->used());
  4356     evacuation_info.set_alloc_regions_used_before(retained_region->used());
  4365   }
  4357   }
  4366 }
  4358 }
  4367 
  4359 
       
  4360 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
       
  4361   assert_at_safepoint(true /* should_be_vm_thread */);
       
  4362 
       
  4363   _survivor_gc_alloc_region.init();
       
  4364   _old_gc_alloc_region.init();
       
  4365 
       
  4366   use_retained_old_gc_alloc_region(evacuation_info);
       
  4367 }
       
  4368 
  4368 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
  4369 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
  4369   evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
  4370   evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
  4370                                          _old_gc_alloc_region.count());
  4371                                          _old_gc_alloc_region.count());
  4371   _survivor_gc_alloc_region.release();
  4372   _survivor_gc_alloc_region.release();
  4372   // If we have an old GC alloc region to release, we'll save it in
  4373   // If we have an old GC alloc region to release, we'll save it in
  4585   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
  4586   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
  4586     _scanned_klass->record_modified_oops();
  4587     _scanned_klass->record_modified_oops();
  4587   }
  4588   }
  4588 }
  4589 }
  4589 
  4590 
  4590 template <G1Barrier barrier, bool do_mark_object>
  4591 template <G1Barrier barrier, G1Mark do_mark_object>
  4591 template <class T>
  4592 template <class T>
  4592 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
  4593 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
  4593   T heap_oop = oopDesc::load_heap_oop(p);
  4594   T heap_oop = oopDesc::load_heap_oop(p);
  4594 
  4595 
  4595   if (oopDesc::is_null(heap_oop)) {
  4596   if (oopDesc::is_null(heap_oop)) {
  4607     } else {
  4608     } else {
  4608       forwardee = _par_scan_state->copy_to_survivor_space(obj);
  4609       forwardee = _par_scan_state->copy_to_survivor_space(obj);
  4609     }
  4610     }
  4610     assert(forwardee != NULL, "forwardee should not be NULL");
  4611     assert(forwardee != NULL, "forwardee should not be NULL");
  4611     oopDesc::encode_store_heap_oop(p, forwardee);
  4612     oopDesc::encode_store_heap_oop(p, forwardee);
  4612     if (do_mark_object && forwardee != obj) {
  4613     if (do_mark_object != G1MarkNone && forwardee != obj) {
  4613       // If the object is self-forwarded we don't need to explicitly
  4614       // If the object is self-forwarded we don't need to explicitly
  4614       // mark it, the evacuation failure protocol will do so.
  4615       // mark it, the evacuation failure protocol will do so.
  4615       mark_forwarded_object(obj, forwardee);
  4616       mark_forwarded_object(obj, forwardee);
  4616     }
  4617     }
  4617 
  4618 
  4618     if (barrier == G1BarrierKlass) {
  4619     if (barrier == G1BarrierKlass) {
  4619       do_klass_barrier(p, forwardee);
  4620       do_klass_barrier(p, forwardee);
  4620     }
  4621     }
  4621   } else {
  4622   } else {
  4622     // The object is not in collection set. If we're a root scanning
  4623     // The object is not in collection set. If we're a root scanning
  4623     // closure during an initial mark pause (i.e. do_mark_object will
  4624     // closure during an initial mark pause then attempt to mark the object.
  4624     // be true) then attempt to mark the object.
  4625     if (do_mark_object == G1MarkFromRoot) {
  4625     if (do_mark_object) {
       
  4626       mark_object(obj);
  4626       mark_object(obj);
  4627     }
  4627     }
  4628   }
  4628   }
  4629 
  4629 
  4630   if (barrier == G1BarrierEvac) {
  4630   if (barrier == G1BarrierEvac) {
  4631     _par_scan_state->update_rs(_from, p, _worker_id);
  4631     _par_scan_state->update_rs(_from, p, _worker_id);
  4632   }
  4632   }
  4633 }
  4633 }
  4634 
  4634 
  4635 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
  4635 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
  4636 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
  4636 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
  4637 
  4637 
  4638 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4638 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4639 protected:
  4639 protected:
  4640   G1CollectedHeap*              _g1h;
  4640   G1CollectedHeap*              _g1h;
  4641   G1ParScanThreadState*         _par_scan_state;
  4641   G1ParScanThreadState*         _par_scan_state;
  4744     _g1h->set_n_termination(active_workers);
  4744     _g1h->set_n_termination(active_workers);
  4745     terminator()->reset_for_reuse(active_workers);
  4745     terminator()->reset_for_reuse(active_workers);
  4746     _n_workers = active_workers;
  4746     _n_workers = active_workers;
  4747   }
  4747   }
  4748 
  4748 
       
  4749   // Helps out with CLD processing.
       
  4750   //
       
  4751   // During InitialMark we need to:
       
  4752   // 1) Scavenge all CLDs for the young GC.
       
  4753   // 2) Mark all objects directly reachable from strong CLDs.
       
  4754   template <G1Mark do_mark_object>
       
  4755   class G1CLDClosure : public CLDClosure {
       
  4756     G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
       
  4757     G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
       
  4758     G1KlassScanClosure                                _klass_in_cld_closure;
       
  4759     bool                                              _claim;
       
  4760 
       
  4761    public:
       
  4762     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
       
  4763                  bool only_young, bool claim)
       
  4764         : _oop_closure(oop_closure),
       
  4765           _oop_in_klass_closure(oop_closure->g1(),
       
  4766                                 oop_closure->pss(),
       
  4767                                 oop_closure->rp()),
       
  4768           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
       
  4769           _claim(claim) {
       
  4770 
       
  4771     }
       
  4772 
       
  4773     void do_cld(ClassLoaderData* cld) {
       
  4774       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
       
  4775     }
       
  4776   };
       
  4777 
       
  4778   class G1CodeBlobClosure: public CodeBlobClosure {
       
  4779     OopClosure* _f;
       
  4780 
       
  4781    public:
       
  4782     G1CodeBlobClosure(OopClosure* f) : _f(f) {}
       
  4783     void do_code_blob(CodeBlob* blob) {
       
  4784       nmethod* that = blob->as_nmethod_or_null();
       
  4785       if (that != NULL) {
       
  4786         if (!that->test_set_oops_do_mark()) {
       
  4787           that->oops_do(_f);
       
  4788           that->fix_oop_relocations();
       
  4789         }
       
  4790       }
       
  4791     }
       
  4792   };
       
  4793 
  4749   void work(uint worker_id) {
  4794   void work(uint worker_id) {
  4750     if (worker_id >= _n_workers) return;  // no work needed this round
  4795     if (worker_id >= _n_workers) return;  // no work needed this round
  4751 
  4796 
  4752     double start_time_ms = os::elapsedTime() * 1000.0;
  4797     double start_time_ms = os::elapsedTime() * 1000.0;
  4753     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
  4798     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
  4761       G1ParScanThreadState            pss(_g1h, worker_id, rp);
  4806       G1ParScanThreadState            pss(_g1h, worker_id, rp);
  4762       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
  4807       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
  4763 
  4808 
  4764       pss.set_evac_failure_closure(&evac_failure_cl);
  4809       pss.set_evac_failure_closure(&evac_failure_cl);
  4765 
  4810 
  4766       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
  4811       bool only_young = _g1h->g1_policy()->gcs_are_young();
  4767       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
  4812 
  4768 
  4813       // Non-IM young GC.
  4769       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
  4814       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
  4770       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
  4815       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
  4771 
  4816                                                                                only_young, // Only process dirty klasses.
  4772       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
  4817                                                                                false);     // No need to claim CLDs.
  4773       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
  4818       // IM young GC.
  4774       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
  4819       //    Strong roots closures.
  4775 
  4820       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
  4776       OopClosure*                    scan_root_cl = &only_scan_root_cl;
  4821       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
  4777       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
  4822                                                                                false, // Process all klasses.
       
  4823                                                                                true); // Need to claim CLDs.
       
  4824       //    Weak roots closures.
       
  4825       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
       
  4826       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
       
  4827                                                                                     false, // Process all klasses.
       
  4828                                                                                     true); // Need to claim CLDs.
       
  4829 
       
  4830       G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
       
  4831       G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
       
  4832       // IM Weak code roots are handled later.
       
  4833 
       
  4834       OopClosure* strong_root_cl;
       
  4835       OopClosure* weak_root_cl;
       
  4836       CLDClosure* strong_cld_cl;
       
  4837       CLDClosure* weak_cld_cl;
       
  4838       CodeBlobClosure* strong_code_cl;
  4778 
  4839 
  4779       if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4840       if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4780         // We also need to mark copied objects.
  4841         // We also need to mark copied objects.
  4781         scan_root_cl = &scan_mark_root_cl;
  4842         strong_root_cl = &scan_mark_root_cl;
  4782         scan_klasses_cl = &scan_mark_klasses_cl_s;
  4843         weak_root_cl   = &scan_mark_weak_root_cl;
       
  4844         strong_cld_cl  = &scan_mark_cld_cl;
       
  4845         weak_cld_cl    = &scan_mark_weak_cld_cl;
       
  4846         strong_code_cl = &scan_mark_code_cl;
       
  4847       } else {
       
  4848         strong_root_cl = &scan_only_root_cl;
       
  4849         weak_root_cl   = &scan_only_root_cl;
       
  4850         strong_cld_cl  = &scan_only_cld_cl;
       
  4851         weak_cld_cl    = &scan_only_cld_cl;
       
  4852         strong_code_cl = &scan_only_code_cl;
  4783       }
  4853       }
  4784 
  4854 
  4785       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  4855 
  4786 
  4856       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
  4787       // Don't scan the scavengable methods in the code cache as part
       
  4788       // of strong root scanning. The code roots that point into a
       
  4789       // region in the collection set are scanned when we scan the
       
  4790       // region's RSet.
       
  4791       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
       
  4792 
  4857 
  4793       pss.start_strong_roots();
  4858       pss.start_strong_roots();
  4794       _g1h->g1_process_strong_roots(/* is scavenging */ true,
  4859       _g1h->g1_process_roots(strong_root_cl,
  4795                                     SharedHeap::ScanningOption(so),
  4860                              weak_root_cl,
  4796                                     scan_root_cl,
  4861                              &push_heap_rs_cl,
  4797                                     &push_heap_rs_cl,
  4862                              strong_cld_cl,
  4798                                     scan_klasses_cl,
  4863                              weak_cld_cl,
  4799                                     worker_id);
  4864                              strong_code_cl,
       
  4865                              worker_id);
       
  4866 
  4800       pss.end_strong_roots();
  4867       pss.end_strong_roots();
  4801 
  4868 
  4802       {
  4869       {
  4803         double start = os::elapsedTime();
  4870         double start = os::elapsedTime();
  4804         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4871         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4832 
  4899 
  4833 // This method is run in a GC worker.
  4900 // This method is run in a GC worker.
  4834 
  4901 
  4835 void
  4902 void
  4836 G1CollectedHeap::
  4903 G1CollectedHeap::
  4837 g1_process_strong_roots(bool is_scavenging,
  4904 g1_process_roots(OopClosure* scan_non_heap_roots,
  4838                         ScanningOption so,
  4905                  OopClosure* scan_non_heap_weak_roots,
  4839                         OopClosure* scan_non_heap_roots,
  4906                  OopsInHeapRegionClosure* scan_rs,
  4840                         OopsInHeapRegionClosure* scan_rs,
  4907                  CLDClosure* scan_strong_clds,
  4841                         G1KlassScanClosure* scan_klasses,
  4908                  CLDClosure* scan_weak_clds,
  4842                         uint worker_i) {
  4909                  CodeBlobClosure* scan_strong_code,
  4843 
  4910                  uint worker_i) {
  4844   // First scan the strong roots
  4911 
       
  4912   // First scan the shared roots.
  4845   double ext_roots_start = os::elapsedTime();
  4913   double ext_roots_start = os::elapsedTime();
  4846   double closure_app_time_sec = 0.0;
  4914   double closure_app_time_sec = 0.0;
  4847 
  4915 
       
  4916   bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
       
  4917 
  4848   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4918   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4849 
  4919   BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
  4850   process_strong_roots(false, // no scoping; this is parallel code
  4920 
  4851                        so,
  4921   process_roots(false, // no scoping; this is parallel code
  4852                        &buf_scan_non_heap_roots,
  4922                 SharedHeap::SO_None,
  4853                        scan_klasses
  4923                 &buf_scan_non_heap_roots,
  4854                        );
  4924                 &buf_scan_non_heap_weak_roots,
       
  4925                 scan_strong_clds,
       
  4926                 // Initial Mark handles the weak CLDs separately.
       
  4927                 (during_im ? NULL : scan_weak_clds),
       
  4928                 scan_strong_code);
  4855 
  4929 
  4856   // Now the CM ref_processor roots.
  4930   // Now the CM ref_processor roots.
  4857   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4931   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4858     // We need to treat the discovered reference lists of the
  4932     // We need to treat the discovered reference lists of the
  4859     // concurrent mark ref processor as roots and keep entries
  4933     // concurrent mark ref processor as roots and keep entries
  4860     // (which are added by the marking threads) on them live
  4934     // (which are added by the marking threads) on them live
  4861     // until they can be processed at the end of marking.
  4935     // until they can be processed at the end of marking.
  4862     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  4936     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  4863   }
  4937   }
  4864 
  4938 
       
  4939   if (during_im) {
       
  4940     // Barrier to make sure all workers passed
       
  4941     // the strong CLD and strong nmethods phases.
       
  4942     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
       
  4943 
       
  4944     // Now take the complement of the strong CLDs.
       
  4945     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
       
  4946   }
       
  4947 
  4865   // Finish up any enqueued closure apps (attributed as object copy time).
  4948   // Finish up any enqueued closure apps (attributed as object copy time).
  4866   buf_scan_non_heap_roots.done();
  4949   buf_scan_non_heap_roots.done();
  4867 
  4950   buf_scan_non_heap_weak_roots.done();
  4868   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
  4951 
       
  4952   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
       
  4953       + buf_scan_non_heap_weak_roots.closure_app_seconds();
  4869 
  4954 
  4870   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  4955   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  4871 
  4956 
  4872   double ext_root_time_ms =
  4957   double ext_root_time_ms =
  4873     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  4958     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  4887       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
  4972       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
  4888     }
  4973     }
  4889   }
  4974   }
  4890   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
  4975   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
  4891 
  4976 
  4892   // If this is an initial mark pause, and we're not scanning
       
  4893   // the entire code cache, we need to mark the oops in the
       
  4894   // strong code root lists for the regions that are not in
       
  4895   // the collection set.
       
  4896   // Note all threads participate in this set of root tasks.
       
  4897   double mark_strong_code_roots_ms = 0.0;
       
  4898   if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
       
  4899     double mark_strong_roots_start = os::elapsedTime();
       
  4900     mark_strong_code_roots(worker_i);
       
  4901     mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
       
  4902   }
       
  4903   g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
       
  4904 
       
  4905   // Now scan the complement of the collection set.
  4977   // Now scan the complement of the collection set.
  4906   CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
  4978   MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
  4907   g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
  4979 
       
  4980   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
  4908 
  4981 
  4909   _process_strong_tasks->all_tasks_completed();
  4982   _process_strong_tasks->all_tasks_completed();
  4910 }
  4983 }
  4911 
  4984 
  4912 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
  4985 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
  4924   int _symbols_removed;
  4997   int _symbols_removed;
  4925 
  4998 
  4926   bool _do_in_parallel;
  4999   bool _do_in_parallel;
  4927 public:
  5000 public:
  4928   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
  5001   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
  4929     AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
  5002     AbstractGangTask("String/Symbol Unlinking"),
       
  5003     _is_alive(is_alive),
  4930     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
  5004     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
  4931     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
  5005     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
  4932     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
  5006     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
  4933 
  5007 
  4934     _initial_string_table_size = StringTable::the_table()->table_size();
  5008     _initial_string_table_size = StringTable::the_table()->table_size();
  4946               err_msg("claim value %d after unlink less than initial string table size %d",
  5020               err_msg("claim value %d after unlink less than initial string table size %d",
  4947                       StringTable::parallel_claimed_index(), _initial_string_table_size));
  5021                       StringTable::parallel_claimed_index(), _initial_string_table_size));
  4948     guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
  5022     guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
  4949               err_msg("claim value %d after unlink less than initial symbol table size %d",
  5023               err_msg("claim value %d after unlink less than initial symbol table size %d",
  4950                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
  5024                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
       
  5025 
       
  5026     if (G1TraceStringSymbolTableScrubbing) {
       
  5027       gclog_or_tty->print_cr("Cleaned string and symbol table, "
       
  5028                              "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
       
  5029                              "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
       
  5030                              strings_processed(), strings_removed(),
       
  5031                              symbols_processed(), symbols_removed());
       
  5032     }
  4951   }
  5033   }
  4952 
  5034 
  4953   void work(uint worker_id) {
  5035   void work(uint worker_id) {
  4954     if (_do_in_parallel) {
  5036     if (_do_in_parallel) {
  4955       int strings_processed = 0;
  5037       int strings_processed = 0;
  4981 
  5063 
  4982   size_t symbols_processed() const { return (size_t)_symbols_processed; }
  5064   size_t symbols_processed() const { return (size_t)_symbols_processed; }
  4983   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
  5065   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
  4984 };
  5066 };
  4985 
  5067 
  4986 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
  5068 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
  4987                                                      bool process_strings, bool process_symbols) {
  5069 private:
       
  5070   static Monitor* _lock;
       
  5071 
       
  5072   BoolObjectClosure* const _is_alive;
       
  5073   const bool               _unloading_occurred;
       
  5074   const uint               _num_workers;
       
  5075 
       
  5076   // Variables used to claim nmethods.
       
  5077   nmethod* _first_nmethod;
       
  5078   volatile nmethod* _claimed_nmethod;
       
  5079 
       
  5080   // The list of nmethods that need to be processed by the second pass.
       
  5081   volatile nmethod* _postponed_list;
       
  5082   volatile uint     _num_entered_barrier;
       
  5083 
       
  5084  public:
       
  5085   G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
       
  5086       _is_alive(is_alive),
       
  5087       _unloading_occurred(unloading_occurred),
       
  5088       _num_workers(num_workers),
       
  5089       _first_nmethod(NULL),
       
  5090       _claimed_nmethod(NULL),
       
  5091       _postponed_list(NULL),
       
  5092       _num_entered_barrier(0)
       
  5093   {
       
  5094     nmethod::increase_unloading_clock();
       
  5095     _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
       
  5096     _claimed_nmethod = (volatile nmethod*)_first_nmethod;
       
  5097   }
       
  5098 
       
  5099   ~G1CodeCacheUnloadingTask() {
       
  5100     CodeCache::verify_clean_inline_caches();
       
  5101 
       
  5102     CodeCache::set_needs_cache_clean(false);
       
  5103     guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
       
  5104 
       
  5105     CodeCache::verify_icholder_relocations();
       
  5106   }
       
  5107 
       
  5108  private:
       
  5109   void add_to_postponed_list(nmethod* nm) {
       
  5110       nmethod* old;
       
  5111       do {
       
  5112         old = (nmethod*)_postponed_list;
       
  5113         nm->set_unloading_next(old);
       
  5114       } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
       
  5115   }
       
  5116 
       
  5117   void clean_nmethod(nmethod* nm) {
       
  5118     bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
       
  5119 
       
  5120     if (postponed) {
       
  5121       // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
       
  5122       add_to_postponed_list(nm);
       
  5123     }
       
  5124 
       
  5125     // Mark that this thread has been cleaned/unloaded.
       
  5126     // After this call, it will be safe to ask if this nmethod was unloaded or not.
       
  5127     nm->set_unloading_clock(nmethod::global_unloading_clock());
       
  5128   }
       
  5129 
       
  5130   void clean_nmethod_postponed(nmethod* nm) {
       
  5131     nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
       
  5132   }
       
  5133 
       
  5134   static const int MaxClaimNmethods = 16;
       
  5135 
       
  5136   void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
       
  5137     nmethod* first;
       
  5138     nmethod* last;
       
  5139 
       
  5140     do {
       
  5141       *num_claimed_nmethods = 0;
       
  5142 
       
  5143       first = last = (nmethod*)_claimed_nmethod;
       
  5144 
       
  5145       if (first != NULL) {
       
  5146         for (int i = 0; i < MaxClaimNmethods; i++) {
       
  5147           last = CodeCache::alive_nmethod(CodeCache::next(last));
       
  5148 
       
  5149           if (last == NULL) {
       
  5150             break;
       
  5151           }
       
  5152 
       
  5153           claimed_nmethods[i] = last;
       
  5154           (*num_claimed_nmethods)++;
       
  5155         }
       
  5156       }
       
  5157 
       
  5158     } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
       
  5159   }
       
  5160 
       
  5161   nmethod* claim_postponed_nmethod() {
       
  5162     nmethod* claim;
       
  5163     nmethod* next;
       
  5164 
       
  5165     do {
       
  5166       claim = (nmethod*)_postponed_list;
       
  5167       if (claim == NULL) {
       
  5168         return NULL;
       
  5169       }
       
  5170 
       
  5171       next = claim->unloading_next();
       
  5172 
       
  5173     } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
       
  5174 
       
  5175     return claim;
       
  5176   }
       
  5177 
       
  5178  public:
       
  5179   // Mark that we're done with the first pass of nmethod cleaning.
       
  5180   void barrier_mark(uint worker_id) {
       
  5181     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
       
  5182     _num_entered_barrier++;
       
  5183     if (_num_entered_barrier == _num_workers) {
       
  5184       ml.notify_all();
       
  5185     }
       
  5186   }
       
  5187 
       
  5188   // See if we have to wait for the other workers to
       
  5189   // finish their first-pass nmethod cleaning work.
       
  5190   void barrier_wait(uint worker_id) {
       
  5191     if (_num_entered_barrier < _num_workers) {
       
  5192       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
       
  5193       while (_num_entered_barrier < _num_workers) {
       
  5194           ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
       
  5195       }
       
  5196     }
       
  5197   }
       
  5198 
       
  5199   // Cleaning and unloading of nmethods. Some work has to be postponed
       
  5200   // to the second pass, when we know which nmethods survive.
       
  5201   void work_first_pass(uint worker_id) {
       
  5202     // The first nmethods is claimed by the first worker.
       
  5203     if (worker_id == 0 && _first_nmethod != NULL) {
       
  5204       clean_nmethod(_first_nmethod);
       
  5205       _first_nmethod = NULL;
       
  5206     }
       
  5207 
       
  5208     int num_claimed_nmethods;
       
  5209     nmethod* claimed_nmethods[MaxClaimNmethods];
       
  5210 
       
  5211     while (true) {
       
  5212       claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
       
  5213 
       
  5214       if (num_claimed_nmethods == 0) {
       
  5215         break;
       
  5216       }
       
  5217 
       
  5218       for (int i = 0; i < num_claimed_nmethods; i++) {
       
  5219         clean_nmethod(claimed_nmethods[i]);
       
  5220       }
       
  5221     }
       
  5222   }
       
  5223 
       
  5224   void work_second_pass(uint worker_id) {
       
  5225     nmethod* nm;
       
  5226     // Take care of postponed nmethods.
       
  5227     while ((nm = claim_postponed_nmethod()) != NULL) {
       
  5228       clean_nmethod_postponed(nm);
       
  5229     }
       
  5230   }
       
  5231 };
       
  5232 
       
  5233 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
       
  5234 
       
  5235 class G1KlassCleaningTask : public StackObj {
       
  5236   BoolObjectClosure*                      _is_alive;
       
  5237   volatile jint                           _clean_klass_tree_claimed;
       
  5238   ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
       
  5239 
       
  5240  public:
       
  5241   G1KlassCleaningTask(BoolObjectClosure* is_alive) :
       
  5242       _is_alive(is_alive),
       
  5243       _clean_klass_tree_claimed(0),
       
  5244       _klass_iterator() {
       
  5245   }
       
  5246 
       
  5247  private:
       
  5248   bool claim_clean_klass_tree_task() {
       
  5249     if (_clean_klass_tree_claimed) {
       
  5250       return false;
       
  5251     }
       
  5252 
       
  5253     return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
       
  5254   }
       
  5255 
       
  5256   InstanceKlass* claim_next_klass() {
       
  5257     Klass* klass;
       
  5258     do {
       
  5259       klass =_klass_iterator.next_klass();
       
  5260     } while (klass != NULL && !klass->oop_is_instance());
       
  5261 
       
  5262     return (InstanceKlass*)klass;
       
  5263   }
       
  5264 
       
  5265 public:
       
  5266 
       
  5267   void clean_klass(InstanceKlass* ik) {
       
  5268     ik->clean_implementors_list(_is_alive);
       
  5269     ik->clean_method_data(_is_alive);
       
  5270 
       
  5271     // G1 specific cleanup work that has
       
  5272     // been moved here to be done in parallel.
       
  5273     ik->clean_dependent_nmethods();
       
  5274   }
       
  5275 
       
  5276   void work() {
       
  5277     ResourceMark rm;
       
  5278 
       
  5279     // One worker will clean the subklass/sibling klass tree.
       
  5280     if (claim_clean_klass_tree_task()) {
       
  5281       Klass::clean_subklass_tree(_is_alive);
       
  5282     }
       
  5283 
       
  5284     // All workers will help cleaning the classes,
       
  5285     InstanceKlass* klass;
       
  5286     while ((klass = claim_next_klass()) != NULL) {
       
  5287       clean_klass(klass);
       
  5288     }
       
  5289   }
       
  5290 };
       
  5291 
       
  5292 // To minimize the remark pause times, the tasks below are done in parallel.
       
  5293 class G1ParallelCleaningTask : public AbstractGangTask {
       
  5294 private:
       
  5295   G1StringSymbolTableUnlinkTask _string_symbol_task;
       
  5296   G1CodeCacheUnloadingTask      _code_cache_task;
       
  5297   G1KlassCleaningTask           _klass_cleaning_task;
       
  5298 
       
  5299 public:
       
  5300   // The constructor is run in the VMThread.
       
  5301   G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
       
  5302       AbstractGangTask("Parallel Cleaning"),
       
  5303       _string_symbol_task(is_alive, process_strings, process_symbols),
       
  5304       _code_cache_task(num_workers, is_alive, unloading_occurred),
       
  5305       _klass_cleaning_task(is_alive) {
       
  5306   }
       
  5307 
       
  5308   // The parallel work done by all worker threads.
       
  5309   void work(uint worker_id) {
       
  5310     // Do first pass of code cache cleaning.
       
  5311     _code_cache_task.work_first_pass(worker_id);
       
  5312 
       
  5313     // Let the threads, mark that the first pass is done.
       
  5314     _code_cache_task.barrier_mark(worker_id);
       
  5315 
       
  5316     // Clean the Strings and Symbols.
       
  5317     _string_symbol_task.work(worker_id);
       
  5318 
       
  5319     // Wait for all workers to finish the first code cache cleaning pass.
       
  5320     _code_cache_task.barrier_wait(worker_id);
       
  5321 
       
  5322     // Do the second code cache cleaning work, which realize on
       
  5323     // the liveness information gathered during the first pass.
       
  5324     _code_cache_task.work_second_pass(worker_id);
       
  5325 
       
  5326     // Clean all klasses that were not unloaded.
       
  5327     _klass_cleaning_task.work();
       
  5328   }
       
  5329 };
       
  5330 
       
  5331 
       
  5332 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
       
  5333                                         bool process_strings,
       
  5334                                         bool process_symbols,
       
  5335                                         bool class_unloading_occurred) {
  4988   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  5336   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  4989                    _g1h->workers()->active_workers() : 1);
  5337                     workers()->active_workers() : 1);
  4990 
  5338 
  4991   G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
  5339   G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
       
  5340                                         n_workers, class_unloading_occurred);
  4992   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5341   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4993     set_par_threads(n_workers);
  5342     set_par_threads(n_workers);
  4994     workers()->run_task(&g1_unlink_task);
  5343     workers()->run_task(&g1_unlink_task);
  4995     set_par_threads(0);
  5344     set_par_threads(0);
  4996   } else {
  5345   } else {
  4997     g1_unlink_task.work(0);
  5346     g1_unlink_task.work(0);
  4998   }
  5347   }
  4999   if (G1TraceStringSymbolTableScrubbing) {
  5348 }
  5000     gclog_or_tty->print_cr("Cleaned string and symbol table, "
  5349 
  5001                            "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
  5350 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
  5002                            "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
  5351                                                      bool process_strings, bool process_symbols) {
  5003                            g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
  5352   {
  5004                            g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
  5353     uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
       
  5354                      _g1h->workers()->active_workers() : 1);
       
  5355     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
       
  5356     if (G1CollectedHeap::use_parallel_gc_threads()) {
       
  5357       set_par_threads(n_workers);
       
  5358       workers()->run_task(&g1_unlink_task);
       
  5359       set_par_threads(0);
       
  5360     } else {
       
  5361       g1_unlink_task.work(0);
       
  5362     }
  5005   }
  5363   }
  5006 
  5364 
  5007   if (G1StringDedup::is_enabled()) {
  5365   if (G1StringDedup::is_enabled()) {
  5008     G1StringDedup::unlink(is_alive);
  5366     G1StringDedup::unlink(is_alive);
  5009   }
  5367   }
  5592   double start_par_time_sec = os::elapsedTime();
  5950   double start_par_time_sec = os::elapsedTime();
  5593   double end_par_time_sec;
  5951   double end_par_time_sec;
  5594 
  5952 
  5595   {
  5953   {
  5596     StrongRootsScope srs(this);
  5954     StrongRootsScope srs(this);
       
  5955     // InitialMark needs claim bits to keep track of the marked-through CLDs.
       
  5956     if (g1_policy()->during_initial_mark_pause()) {
       
  5957       ClassLoaderDataGraph::clear_claimed_marks();
       
  5958     }
  5597 
  5959 
  5598     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5960     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5599       // The individual threads will set their evac-failure closures.
  5961       // The individual threads will set their evac-failure closures.
  5600       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  5962       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  5601       // These tasks use ShareHeap::_process_strong_tasks
  5963       // These tasks use ShareHeap::_process_strong_tasks
  6627   G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
  6989   G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
  6628   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
  6990   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
  6629   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
  6991   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
  6630 }
  6992 }
  6631 
  6993 
  6632 // Mark all the code roots that point into regions *not* in the
       
  6633 // collection set.
       
  6634 //
       
  6635 // Note we do not want to use a "marking" CodeBlobToOopClosure while
       
  6636 // walking the the code roots lists of regions not in the collection
       
  6637 // set. Suppose we have an nmethod (M) that points to objects in two
       
  6638 // separate regions - one in the collection set (R1) and one not (R2).
       
  6639 // Using a "marking" CodeBlobToOopClosure here would result in "marking"
       
  6640 // nmethod M when walking the code roots for R1. When we come to scan
       
  6641 // the code roots for R2, we would see that M is already marked and it
       
  6642 // would be skipped and the objects in R2 that are referenced from M
       
  6643 // would not be evacuated.
       
  6644 
       
  6645 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
       
  6646 
       
  6647   class MarkStrongCodeRootOopClosure: public OopClosure {
       
  6648     ConcurrentMark* _cm;
       
  6649     HeapRegion* _hr;
       
  6650     uint _worker_id;
       
  6651 
       
  6652     template <class T> void do_oop_work(T* p) {
       
  6653       T heap_oop = oopDesc::load_heap_oop(p);
       
  6654       if (!oopDesc::is_null(heap_oop)) {
       
  6655         oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       
  6656         // Only mark objects in the region (which is assumed
       
  6657         // to be not in the collection set).
       
  6658         if (_hr->is_in(obj)) {
       
  6659           _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
       
  6660         }
       
  6661       }
       
  6662     }
       
  6663 
       
  6664   public:
       
  6665     MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
       
  6666       _cm(cm), _hr(hr), _worker_id(worker_id) {
       
  6667       assert(!_hr->in_collection_set(), "sanity");
       
  6668     }
       
  6669 
       
  6670     void do_oop(narrowOop* p) { do_oop_work(p); }
       
  6671     void do_oop(oop* p)       { do_oop_work(p); }
       
  6672   };
       
  6673 
       
  6674   MarkStrongCodeRootOopClosure _oop_cl;
       
  6675 
       
  6676 public:
       
  6677   MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
       
  6678     _oop_cl(cm, hr, worker_id) {}
       
  6679 
       
  6680   void do_code_blob(CodeBlob* cb) {
       
  6681     nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
       
  6682     if (nm != NULL) {
       
  6683       nm->oops_do(&_oop_cl);
       
  6684     }
       
  6685   }
       
  6686 };
       
  6687 
       
  6688 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
       
  6689   G1CollectedHeap* _g1h;
       
  6690   uint _worker_id;
       
  6691 
       
  6692 public:
       
  6693   MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
       
  6694     _g1h(g1h), _worker_id(worker_id) {}
       
  6695 
       
  6696   bool doHeapRegion(HeapRegion *hr) {
       
  6697     HeapRegionRemSet* hrrs = hr->rem_set();
       
  6698     if (hr->continuesHumongous()) {
       
  6699       // Code roots should never be attached to a continuation of a humongous region
       
  6700       assert(hrrs->strong_code_roots_list_length() == 0,
       
  6701              err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
       
  6702                      " starting at "HR_FORMAT", but has "SIZE_FORMAT,
       
  6703                      HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
       
  6704                      hrrs->strong_code_roots_list_length()));
       
  6705       return false;
       
  6706     }
       
  6707 
       
  6708     if (hr->in_collection_set()) {
       
  6709       // Don't mark code roots into regions in the collection set here.
       
  6710       // They will be marked when we scan them.
       
  6711       return false;
       
  6712     }
       
  6713 
       
  6714     MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
       
  6715     hr->strong_code_roots_do(&cb_cl);
       
  6716     return false;
       
  6717   }
       
  6718 };
       
  6719 
       
  6720 void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
       
  6721   MarkStrongCodeRootsHRClosure cl(this, worker_id);
       
  6722   if (G1CollectedHeap::use_parallel_gc_threads()) {
       
  6723     heap_region_par_iterate_chunked(&cl,
       
  6724                                     worker_id,
       
  6725                                     workers()->active_workers(),
       
  6726                                     HeapRegion::ParMarkRootClaimValue);
       
  6727   } else {
       
  6728     heap_region_iterate(&cl);
       
  6729   }
       
  6730 }
       
  6731 
       
  6732 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
  6994 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
  6733   G1CollectedHeap* _g1h;
  6995   G1CollectedHeap* _g1h;
  6734 
  6996 
  6735 public:
  6997 public:
  6736   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
  6998   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :