hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
changeset 29698 9be8d1b0dfdc
parent 29693 fac175f7a466
parent 29584 5b3cb9f0e39d
child 29792 8c6fa07f0869
equal deleted inserted replaced
29697:92501504191b 29698:9be8d1b0dfdc
  4741 
  4741 
  4742       for (int i = 0; i < num_claimed_nmethods; i++) {
  4742       for (int i = 0; i < num_claimed_nmethods; i++) {
  4743         clean_nmethod(claimed_nmethods[i]);
  4743         clean_nmethod(claimed_nmethods[i]);
  4744       }
  4744       }
  4745     }
  4745     }
  4746 
       
  4747     // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
       
  4748     // Need to retire the buffers now that this thread has stopped cleaning nmethods.
       
  4749     MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
       
  4750   }
  4746   }
  4751 
  4747 
  4752   void work_second_pass(uint worker_id) {
  4748   void work_second_pass(uint worker_id) {
  4753     nmethod* nm;
  4749     nmethod* nm;
  4754     // Take care of postponed nmethods.
  4750     // Take care of postponed nmethods.
  4797     ik->clean_method_data(_is_alive);
  4793     ik->clean_method_data(_is_alive);
  4798 
  4794 
  4799     // G1 specific cleanup work that has
  4795     // G1 specific cleanup work that has
  4800     // been moved here to be done in parallel.
  4796     // been moved here to be done in parallel.
  4801     ik->clean_dependent_nmethods();
  4797     ik->clean_dependent_nmethods();
  4802     if (JvmtiExport::has_redefined_a_class()) {
       
  4803       InstanceKlass::purge_previous_versions(ik);
       
  4804     }
       
  4805   }
  4798   }
  4806 
  4799 
  4807   void work() {
  4800   void work() {
  4808     ResourceMark rm;
  4801     ResourceMark rm;
  4809 
  4802 
  4834       _string_symbol_task(is_alive, process_strings, process_symbols),
  4827       _string_symbol_task(is_alive, process_strings, process_symbols),
  4835       _code_cache_task(num_workers, is_alive, unloading_occurred),
  4828       _code_cache_task(num_workers, is_alive, unloading_occurred),
  4836       _klass_cleaning_task(is_alive) {
  4829       _klass_cleaning_task(is_alive) {
  4837   }
  4830   }
  4838 
  4831 
  4839   void pre_work_verification() {
       
  4840     assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
       
  4841   }
       
  4842 
       
  4843   void post_work_verification() {
       
  4844     assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
       
  4845   }
       
  4846 
       
  4847   // The parallel work done by all worker threads.
  4832   // The parallel work done by all worker threads.
  4848   void work(uint worker_id) {
  4833   void work(uint worker_id) {
  4849     pre_work_verification();
       
  4850 
       
  4851     // Do first pass of code cache cleaning.
  4834     // Do first pass of code cache cleaning.
  4852     _code_cache_task.work_first_pass(worker_id);
  4835     _code_cache_task.work_first_pass(worker_id);
  4853 
  4836 
  4854     // Let the threads mark that the first pass is done.
  4837     // Let the threads mark that the first pass is done.
  4855     _code_cache_task.barrier_mark(worker_id);
  4838     _code_cache_task.barrier_mark(worker_id);
  4864     // the liveness information gathered during the first pass.
  4847     // the liveness information gathered during the first pass.
  4865     _code_cache_task.work_second_pass(worker_id);
  4848     _code_cache_task.work_second_pass(worker_id);
  4866 
  4849 
  4867     // Clean all klasses that were not unloaded.
  4850     // Clean all klasses that were not unloaded.
  4868     _klass_cleaning_task.work();
  4851     _klass_cleaning_task.work();
  4869 
       
  4870     post_work_verification();
       
  4871   }
  4852   }
  4872 };
  4853 };
  4873 
  4854 
  4874 
  4855 
  4875 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
  4856 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
  5256     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
  5237     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
  5257 
  5238 
  5258     // limit is set using max_num_q() - which was set using ParallelGCThreads.
  5239     // limit is set using max_num_q() - which was set using ParallelGCThreads.
  5259     // So this must be true - but assert just in case someone decides to
  5240     // So this must be true - but assert just in case someone decides to
  5260     // change the worker ids.
  5241     // change the worker ids.
  5261     assert(0 <= worker_id && worker_id < limit, "sanity");
  5242     assert(worker_id < limit, "sanity");
  5262     assert(!rp->discovery_is_atomic(), "check this code");
  5243     assert(!rp->discovery_is_atomic(), "check this code");
  5263 
  5244 
  5264     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
  5245     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
  5265     for (uint idx = worker_id; idx < limit; idx += stride) {
  5246     for (uint idx = worker_id; idx < limit; idx += stride) {
  5266       DiscoveredList& ref_list = rp->discovered_refs()[idx];
  5247       DiscoveredList& ref_list = rp->discovered_refs()[idx];