hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp
changeset 33107 77bf0d2069a3
parent 33105 294e48b4f704
child 33227 b00ec45f8c2c
child 33198 b37ad9fbf681
equal deleted inserted replaced
33106:20c533b9e167 33107:77bf0d2069a3
    38 #include "gc/parallel/psPromotionManager.inline.hpp"
    38 #include "gc/parallel/psPromotionManager.inline.hpp"
    39 #include "gc/parallel/psScavenge.hpp"
    39 #include "gc/parallel/psScavenge.hpp"
    40 #include "gc/parallel/psYoungGen.hpp"
    40 #include "gc/parallel/psYoungGen.hpp"
    41 #include "gc/shared/gcCause.hpp"
    41 #include "gc/shared/gcCause.hpp"
    42 #include "gc/shared/gcHeapSummary.hpp"
    42 #include "gc/shared/gcHeapSummary.hpp"
       
    43 #include "gc/shared/gcId.hpp"
    43 #include "gc/shared/gcLocker.inline.hpp"
    44 #include "gc/shared/gcLocker.inline.hpp"
    44 #include "gc/shared/gcTimer.hpp"
    45 #include "gc/shared/gcTimer.hpp"
    45 #include "gc/shared/gcTrace.hpp"
    46 #include "gc/shared/gcTrace.hpp"
    46 #include "gc/shared/gcTraceTime.hpp"
    47 #include "gc/shared/gcTraceTime.hpp"
    47 #include "gc/shared/isGCActiveMark.hpp"
    48 #include "gc/shared/isGCActiveMark.hpp"
   958 {
   959 {
   959   // Update the from & to space pointers in space_info, since they are swapped
   960   // Update the from & to space pointers in space_info, since they are swapped
   960   // at each young gen gc.  Do the update unconditionally (even though a
   961   // at each young gen gc.  Do the update unconditionally (even though a
   961   // promotion failure does not swap spaces) because an unknown number of young
   962   // promotion failure does not swap spaces) because an unknown number of young
   962   // collections will have swapped the spaces an unknown number of times.
   963   // collections will have swapped the spaces an unknown number of times.
   963   GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   964   GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
   964   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   965   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   965   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
   966   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
   966   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
   967   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
   967 
   968 
   968   pre_gc_values->fill(heap);
   969   pre_gc_values->fill(heap);
  1001   gc_task_manager()->release_all_resources();
  1002   gc_task_manager()->release_all_resources();
  1002 }
  1003 }
  1003 
  1004 
  1004 void PSParallelCompact::post_compact()
  1005 void PSParallelCompact::post_compact()
  1005 {
  1006 {
  1006   GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  1007   GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
  1007 
  1008 
  1008   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
  1009   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
  1009     // Clear the marking bitmap, summary data and split info.
  1010     // Clear the marking bitmap, summary data and split info.
  1010     clear_data_covering_space(SpaceId(id));
  1011     clear_data_covering_space(SpaceId(id));
  1011     // Update top().  Must be done after clearing the bitmap and summary data.
  1012     // Update top().  Must be done after clearing the bitmap and summary data.
  1822 #endif  // #ifndef PRODUCT
  1823 #endif  // #ifndef PRODUCT
  1823 
  1824 
  1824 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
  1825 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
  1825                                       bool maximum_compaction)
  1826                                       bool maximum_compaction)
  1826 {
  1827 {
  1827   GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  1828   GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
  1828   // trace("2");
  1829   // trace("2");
  1829 
  1830 
  1830 #ifdef  ASSERT
  1831 #ifdef  ASSERT
  1831   if (TraceParallelOldGCMarkingPhase) {
  1832   if (TraceParallelOldGCMarkingPhase) {
  1832     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
  1833     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
  1982     return false;
  1983     return false;
  1983   }
  1984   }
  1984 
  1985 
  1985   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  1986   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  1986 
  1987 
       
  1988   GCIdMark gc_id_mark;
  1987   _gc_timer.register_gc_start();
  1989   _gc_timer.register_gc_start();
  1988   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
  1990   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
  1989 
  1991 
  1990   TimeStamp marking_start;
  1992   TimeStamp marking_start;
  1991   TimeStamp compaction_start;
  1993   TimeStamp compaction_start;
  2029     // Set the number of GC threads to be used in this collection
  2031     // Set the number of GC threads to be used in this collection
  2030     gc_task_manager()->set_active_gang();
  2032     gc_task_manager()->set_active_gang();
  2031     gc_task_manager()->task_idle_workers();
  2033     gc_task_manager()->task_idle_workers();
  2032 
  2034 
  2033     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  2035     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  2034     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
  2036     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
  2035     TraceCollectorStats tcs(counters());
  2037     TraceCollectorStats tcs(counters());
  2036     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
  2038     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
  2037 
  2039 
  2038     if (TraceOldGenTime) accumulated_time()->start();
  2040     if (TraceOldGenTime) accumulated_time()->start();
  2039 
  2041 
  2329 
  2331 
  2330 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
  2332 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
  2331                                       bool maximum_heap_compaction,
  2333                                       bool maximum_heap_compaction,
  2332                                       ParallelOldTracer *gc_tracer) {
  2334                                       ParallelOldTracer *gc_tracer) {
  2333   // Recursively traverse all live objects and mark them
  2335   // Recursively traverse all live objects and mark them
  2334   GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2336   GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
  2335 
  2337 
  2336   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  2338   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  2337   uint parallel_gc_threads = heap->gc_task_manager()->workers();
  2339   uint parallel_gc_threads = heap->gc_task_manager()->workers();
  2338   uint active_gc_threads = heap->gc_task_manager()->active_workers();
  2340   uint active_gc_threads = heap->gc_task_manager()->active_workers();
  2339   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
  2341   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
  2344 
  2346 
  2345   // Need new claim bits before marking starts.
  2347   // Need new claim bits before marking starts.
  2346   ClassLoaderDataGraph::clear_claimed_marks();
  2348   ClassLoaderDataGraph::clear_claimed_marks();
  2347 
  2349 
  2348   {
  2350   {
  2349     GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2351     GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
  2350 
  2352 
  2351     ParallelScavengeHeap::ParStrongRootsScope psrs;
  2353     ParallelScavengeHeap::ParStrongRootsScope psrs;
  2352 
  2354 
  2353     GCTaskQueue* q = GCTaskQueue::create();
  2355     GCTaskQueue* q = GCTaskQueue::create();
  2354 
  2356 
  2373     gc_task_manager()->execute_and_wait(q);
  2375     gc_task_manager()->execute_and_wait(q);
  2374   }
  2376   }
  2375 
  2377 
  2376   // Process reference objects found during marking
  2378   // Process reference objects found during marking
  2377   {
  2379   {
  2378     GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2380     GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
  2379 
  2381 
  2380     ReferenceProcessorStats stats;
  2382     ReferenceProcessorStats stats;
  2381     if (ref_processor()->processing_is_mt()) {
  2383     if (ref_processor()->processing_is_mt()) {
  2382       RefProcTaskExecutor task_executor;
  2384       RefProcTaskExecutor task_executor;
  2383       stats = ref_processor()->process_discovered_references(
  2385       stats = ref_processor()->process_discovered_references(
  2384         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
  2386         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
  2385         &task_executor, &_gc_timer, _gc_tracer.gc_id());
  2387         &task_executor, &_gc_timer);
  2386     } else {
  2388     } else {
  2387       stats = ref_processor()->process_discovered_references(
  2389       stats = ref_processor()->process_discovered_references(
  2388         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
  2390         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
  2389         &_gc_timer, _gc_tracer.gc_id());
  2391         &_gc_timer);
  2390     }
  2392     }
  2391 
  2393 
  2392     gc_tracer->report_gc_reference_stats(stats);
  2394     gc_tracer->report_gc_reference_stats(stats);
  2393   }
  2395   }
  2394 
  2396 
  2395   GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2397   GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
  2396 
  2398 
  2397   // This is the point where the entire marking should have completed.
  2399   // This is the point where the entire marking should have completed.
  2398   assert(cm->marking_stacks_empty(), "Marking should have completed");
  2400   assert(cm->marking_stacks_empty(), "Marking should have completed");
  2399 
  2401 
  2400   // Follow system dictionary roots and unload classes.
  2402   // Follow system dictionary roots and unload classes.
  2421 };
  2423 };
  2422 static PSAlwaysTrueClosure always_true;
  2424 static PSAlwaysTrueClosure always_true;
  2423 
  2425 
  2424 void PSParallelCompact::adjust_roots() {
  2426 void PSParallelCompact::adjust_roots() {
  2425   // Adjust the pointers to reflect the new locations
  2427   // Adjust the pointers to reflect the new locations
  2426   GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2428   GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
  2427 
  2429 
  2428   // Need new claim bits when tracing through and adjusting pointers.
  2430   // Need new claim bits when tracing through and adjusting pointers.
  2429   ClassLoaderDataGraph::clear_claimed_marks();
  2431   ClassLoaderDataGraph::clear_claimed_marks();
  2430 
  2432 
  2431   // General strong roots.
  2433   // General strong roots.
  2457 }
  2459 }
  2458 
  2460 
  2459 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
  2461 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
  2460                                                       uint parallel_gc_threads)
  2462                                                       uint parallel_gc_threads)
  2461 {
  2463 {
  2462   GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2464   GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
  2463 
  2465 
  2464   // Find the threads that are active
  2466   // Find the threads that are active
  2465   unsigned int which = 0;
  2467   unsigned int which = 0;
  2466 
  2468 
  2467   const uint task_count = MAX2(parallel_gc_threads, 1U);
  2469   const uint task_count = MAX2(parallel_gc_threads, 1U);
  2531 
  2533 
  2532 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
  2534 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
  2533 
  2535 
  2534 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
  2536 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
  2535                                                     uint parallel_gc_threads) {
  2537                                                     uint parallel_gc_threads) {
  2536   GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2538   GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
  2537 
  2539 
  2538   ParallelCompactData& sd = PSParallelCompact::summary_data();
  2540   ParallelCompactData& sd = PSParallelCompact::summary_data();
  2539 
  2541 
  2540   // Iterate over all the spaces adding tasks for updating
  2542   // Iterate over all the spaces adding tasks for updating
  2541   // regions in the dense prefix.  Assume that 1 gc thread
  2543   // regions in the dense prefix.  Assume that 1 gc thread
  2613 
  2615 
  2614 void PSParallelCompact::enqueue_region_stealing_tasks(
  2616 void PSParallelCompact::enqueue_region_stealing_tasks(
  2615                                      GCTaskQueue* q,
  2617                                      GCTaskQueue* q,
  2616                                      ParallelTaskTerminator* terminator_ptr,
  2618                                      ParallelTaskTerminator* terminator_ptr,
  2617                                      uint parallel_gc_threads) {
  2619                                      uint parallel_gc_threads) {
  2618   GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2620   GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
  2619 
  2621 
  2620   // Once a thread has drained it's stack, it should try to steal regions from
  2622   // Once a thread has drained it's stack, it should try to steal regions from
  2621   // other threads.
  2623   // other threads.
  2622   if (parallel_gc_threads > 1) {
  2624   if (parallel_gc_threads > 1) {
  2623     for (uint j = 0; j < parallel_gc_threads; j++) {
  2625     for (uint j = 0; j < parallel_gc_threads; j++) {
  2661 }
  2663 }
  2662 #endif // #ifdef ASSERT
  2664 #endif // #ifdef ASSERT
  2663 
  2665 
  2664 void PSParallelCompact::compact() {
  2666 void PSParallelCompact::compact() {
  2665   // trace("5");
  2667   // trace("5");
  2666   GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2668   GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
  2667 
  2669 
  2668   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  2670   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  2669   PSOldGen* old_gen = heap->old_gen();
  2671   PSOldGen* old_gen = heap->old_gen();
  2670   old_gen->start_array()->reset();
  2672   old_gen->start_array()->reset();
  2671   uint parallel_gc_threads = heap->gc_task_manager()->workers();
  2673   uint parallel_gc_threads = heap->gc_task_manager()->workers();
  2677   enqueue_region_draining_tasks(q, active_gc_threads);
  2679   enqueue_region_draining_tasks(q, active_gc_threads);
  2678   enqueue_dense_prefix_tasks(q, active_gc_threads);
  2680   enqueue_dense_prefix_tasks(q, active_gc_threads);
  2679   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
  2681   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
  2680 
  2682 
  2681   {
  2683   {
  2682     GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2684     GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
  2683 
  2685 
  2684     gc_task_manager()->execute_and_wait(q);
  2686     gc_task_manager()->execute_and_wait(q);
  2685 
  2687 
  2686 #ifdef  ASSERT
  2688 #ifdef  ASSERT
  2687     // Verify that all regions have been processed before the deferred updates.
  2689     // Verify that all regions have been processed before the deferred updates.
  2691 #endif
  2693 #endif
  2692   }
  2694   }
  2693 
  2695 
  2694   {
  2696   {
  2695     // Update the deferred objects, if any.  Any compaction manager can be used.
  2697     // Update the deferred objects, if any.  Any compaction manager can be used.
  2696     GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
  2698     GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
  2697     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
  2699     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
  2698     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
  2700     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
  2699       update_deferred_objects(cm, SpaceId(id));
  2701       update_deferred_objects(cm, SpaceId(id));
  2700     }
  2702     }
  2701   }
  2703   }