hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
changeset 6058 9c9aec6ab47d
parent 5891 59044ec3fca3
child 6068 80ef41e75a2d
equal deleted inserted replaced
6057:e660446e0804 6058:9c9aec6ab47d
   807     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
   807     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
   808                                          HeapRegion::RebuildRSClaimValue);
   808                                          HeapRegion::RebuildRSClaimValue);
   809   }
   809   }
   810 };
   810 };
   811 
   811 
   812 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
   812 void G1CollectedHeap::do_collection(bool explicit_gc,
       
   813                                     bool clear_all_soft_refs,
   813                                     size_t word_size) {
   814                                     size_t word_size) {
   814   if (GC_locker::check_active_before_gc()) {
   815   if (GC_locker::check_active_before_gc()) {
   815     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   816     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   816   }
   817   }
   817 
   818 
   819 
   820 
   820   if (PrintHeapAtGC) {
   821   if (PrintHeapAtGC) {
   821     Universe::print_heap_before_gc();
   822     Universe::print_heap_before_gc();
   822   }
   823   }
   823 
   824 
   824   if (full && DisableExplicitGC) {
       
   825     return;
       
   826   }
       
   827 
       
   828   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   825   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   829   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
   826   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
   830 
   827 
   831   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
   828   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
   832                            collector_policy()->should_clear_all_soft_refs();
   829                            collector_policy()->should_clear_all_soft_refs();
   835 
   832 
   836   {
   833   {
   837     IsGCActiveMark x;
   834     IsGCActiveMark x;
   838 
   835 
   839     // Timing
   836     // Timing
       
   837     bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
       
   838     assert(!system_gc || explicit_gc, "invariant");
   840     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   839     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   841     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   840     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   842     TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
   841     TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
   843                 PrintGC, true, gclog_or_tty);
   842                 PrintGC, true, gclog_or_tty);
   844 
   843 
   845     TraceMemoryManagerStats tms(true /* fullGC */);
   844     TraceMemoryManagerStats tms(true /* fullGC */);
   846 
   845 
   847     double start = os::elapsedTime();
   846     double start = os::elapsedTime();
   942     // sets. We will also reset the GC time stamps of the regions.
   941     // sets. We will also reset the GC time stamps of the regions.
   943     PostMCRemSetClearClosure rs_clear(mr_bs());
   942     PostMCRemSetClearClosure rs_clear(mr_bs());
   944     heap_region_iterate(&rs_clear);
   943     heap_region_iterate(&rs_clear);
   945 
   944 
   946     // Resize the heap if necessary.
   945     // Resize the heap if necessary.
   947     resize_if_necessary_after_full_collection(full ? 0 : word_size);
   946     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
   948 
   947 
   949     if (_cg1r->use_cache()) {
   948     if (_cg1r->use_cache()) {
   950       _cg1r->clear_and_record_card_counts();
   949       _cg1r->clear_and_record_card_counts();
   951       _cg1r->clear_hot_cache();
   950       _cg1r->clear_hot_cache();
   952     }
   951     }
  1007     // entire heap tagged as young.
  1006     // entire heap tagged as young.
  1008     assert( check_young_list_empty(true /* check_heap */),
  1007     assert( check_young_list_empty(true /* check_heap */),
  1009             "young list should be empty at this point");
  1008             "young list should be empty at this point");
  1010   }
  1009   }
  1011 
  1010 
       
  1011   // Update the number of full collections that have been completed.
       
  1012   increment_full_collections_completed(false /* outer */);
       
  1013 
  1012   if (PrintHeapAtGC) {
  1014   if (PrintHeapAtGC) {
  1013     Universe::print_heap_after_gc();
  1015     Universe::print_heap_after_gc();
  1014   }
  1016   }
  1015 }
  1017 }
  1016 
  1018 
  1017 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1019 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1018   do_collection(true, clear_all_soft_refs, 0);
  1020   do_collection(true,                /* explicit_gc */
       
  1021                 clear_all_soft_refs,
       
  1022                 0                    /* word_size */);
  1019 }
  1023 }
  1020 
  1024 
  1021 // This code is mostly copied from TenuredGeneration.
  1025 // This code is mostly copied from TenuredGeneration.
  1022 void
  1026 void
  1023 G1CollectedHeap::
  1027 G1CollectedHeap::
  1329   _unclean_region_list(),
  1333   _unclean_region_list(),
  1330   _unclean_regions_coming(false),
  1334   _unclean_regions_coming(false),
  1331   _young_list(new YoungList(this)),
  1335   _young_list(new YoungList(this)),
  1332   _gc_time_stamp(0),
  1336   _gc_time_stamp(0),
  1333   _surviving_young_words(NULL),
  1337   _surviving_young_words(NULL),
       
  1338   _full_collections_completed(0),
  1334   _in_cset_fast_test(NULL),
  1339   _in_cset_fast_test(NULL),
  1335   _in_cset_fast_test_base(NULL),
  1340   _in_cset_fast_test_base(NULL),
  1336   _dirty_cards_region_list(NULL) {
  1341   _dirty_cards_region_list(NULL) {
  1337   _g1h = this; // To catch bugs.
  1342   _g1h = this; // To catch bugs.
  1338   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1343   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1687     return 0;
  1692     return 0;
  1688   }
  1693   }
  1689   return car->free();
  1694   return car->free();
  1690 }
  1695 }
  1691 
  1696 
       
  1697 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
       
  1698   return
       
  1699     ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
       
  1700      (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
       
  1701 }
       
  1702 
       
  1703 void G1CollectedHeap::increment_full_collections_completed(bool outer) {
       
  1704   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
       
  1705 
       
  1706   // We have already incremented _total_full_collections at the start
       
  1707   // of the GC, so total_full_collections() represents how many full
       
  1708   // collections have been started.
       
  1709   unsigned int full_collections_started = total_full_collections();
       
  1710 
       
  1711   // Given that this method is called at the end of a Full GC or of a
       
  1712   // concurrent cycle, and those can be nested (i.e., a Full GC can
       
  1713   // interrupt a concurrent cycle), the number of full collections
       
  1714   // completed should be either one (in the case where there was no
       
  1715   // nesting) or two (when a Full GC interrupted a concurrent cycle)
       
  1716   // behind the number of full collections started.
       
  1717 
       
  1718   // This is the case for the inner caller, i.e. a Full GC.
       
  1719   assert(outer ||
       
  1720          (full_collections_started == _full_collections_completed + 1) ||
       
  1721          (full_collections_started == _full_collections_completed + 2),
       
  1722          err_msg("for inner caller: full_collections_started = %u "
       
  1723                  "is inconsistent with _full_collections_completed = %u",
       
  1724                  full_collections_started, _full_collections_completed));
       
  1725 
       
  1726   // This is the case for the outer caller, i.e. the concurrent cycle.
       
  1727   assert(!outer ||
       
  1728          (full_collections_started == _full_collections_completed + 1),
       
  1729          err_msg("for outer caller: full_collections_started = %u "
       
  1730                  "is inconsistent with _full_collections_completed = %u",
       
  1731                  full_collections_started, _full_collections_completed));
       
  1732 
       
  1733   _full_collections_completed += 1;
       
  1734 
       
  1735   // This notify_all() will ensure that a thread that called
       
  1736   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
       
  1737   // and it's waiting for a full GC to finish will be woken up. It is
       
  1738   // waiting in VM_G1IncCollectionPause::doit_epilogue().
       
  1739   FullGCCount_lock->notify_all();
       
  1740 }
       
  1741 
  1692 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  1742 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  1693   assert(Thread::current()->is_VM_thread(), "Precondition#1");
  1743   assert(Thread::current()->is_VM_thread(), "Precondition#1");
  1694   assert(Heap_lock->is_locked(), "Precondition#2");
  1744   assert(Heap_lock->is_locked(), "Precondition#2");
  1695   GCCauseSetter gcs(this, cause);
  1745   GCCauseSetter gcs(this, cause);
  1696   switch (cause) {
  1746   switch (cause) {
  1707 
  1757 
  1708 void G1CollectedHeap::collect(GCCause::Cause cause) {
  1758 void G1CollectedHeap::collect(GCCause::Cause cause) {
  1709   // The caller doesn't have the Heap_lock
  1759   // The caller doesn't have the Heap_lock
  1710   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  1760   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  1711 
  1761 
  1712   int gc_count_before;
  1762   unsigned int gc_count_before;
       
  1763   unsigned int full_gc_count_before;
  1713   {
  1764   {
  1714     MutexLocker ml(Heap_lock);
  1765     MutexLocker ml(Heap_lock);
  1715     // Read the GC count while holding the Heap_lock
  1766     // Read the GC count while holding the Heap_lock
  1716     gc_count_before = SharedHeap::heap()->total_collections();
  1767     gc_count_before = SharedHeap::heap()->total_collections();
       
  1768     full_gc_count_before = SharedHeap::heap()->total_full_collections();
  1717 
  1769 
  1718     // Don't want to do a GC until cleanup is completed.
  1770     // Don't want to do a GC until cleanup is completed.
  1719     wait_for_cleanup_complete();
  1771     wait_for_cleanup_complete();
  1720   } // We give up heap lock; VMThread::execute gets it back below
  1772 
  1721   switch (cause) {
  1773     // We give up heap lock; VMThread::execute gets it back below
  1722     case GCCause::_scavenge_alot: {
  1774   }
  1723       // Do an incremental pause, which might sometimes be abandoned.
  1775 
  1724       VM_G1IncCollectionPause op(gc_count_before, cause);
  1776   if (should_do_concurrent_full_gc(cause)) {
       
  1777     // Schedule an initial-mark evacuation pause that will start a
       
  1778     // concurrent cycle.
       
  1779     VM_G1IncCollectionPause op(gc_count_before,
       
  1780                                true, /* should_initiate_conc_mark */
       
  1781                                g1_policy()->max_pause_time_ms(),
       
  1782                                cause);
       
  1783     VMThread::execute(&op);
       
  1784   } else {
       
  1785     if (cause == GCCause::_gc_locker
       
  1786         DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
       
  1787 
       
  1788       // Schedule a standard evacuation pause.
       
  1789       VM_G1IncCollectionPause op(gc_count_before,
       
  1790                                  false, /* should_initiate_conc_mark */
       
  1791                                  g1_policy()->max_pause_time_ms(),
       
  1792                                  cause);
  1725       VMThread::execute(&op);
  1793       VMThread::execute(&op);
  1726       break;
  1794     } else {
  1727     }
  1795       // Schedule a Full GC.
  1728     default: {
  1796       VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
  1729       // In all other cases, we currently do a full gc.
       
  1730       VM_G1CollectFull op(gc_count_before, cause);
       
  1731       VMThread::execute(&op);
  1797       VMThread::execute(&op);
  1732     }
  1798     }
  1733   }
  1799   }
  1734 }
  1800 }
  1735 
  1801 
  1987   }
  2053   }
  1988 }
  2054 }
  1989 
  2055 
  1990 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2056 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  1991                                                   HeapRegionClosure *cl) {
  2057                                                   HeapRegionClosure *cl) {
       
  2058   if (r == NULL) {
       
  2059     // The CSet is empty so there's nothing to do.
       
  2060     return;
       
  2061   }
       
  2062 
  1992   assert(r->in_collection_set(),
  2063   assert(r->in_collection_set(),
  1993          "Start region must be a member of the collection set.");
  2064          "Start region must be a member of the collection set.");
  1994   HeapRegion* cur = r;
  2065   HeapRegion* cur = r;
  1995   while (cur != NULL) {
  2066   while (cur != NULL) {
  1996     HeapRegion* next = cur->next_in_collection_set();
  2067     HeapRegion* next = cur->next_in_collection_set();
  2479                         "derived pointer present"));
  2550                         "derived pointer present"));
  2480   // always_do_update_barrier = true;
  2551   // always_do_update_barrier = true;
  2481 }
  2552 }
  2482 
  2553 
  2483 void G1CollectedHeap::do_collection_pause() {
  2554 void G1CollectedHeap::do_collection_pause() {
       
  2555   assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
       
  2556 
  2484   // Read the GC count while holding the Heap_lock
  2557   // Read the GC count while holding the Heap_lock
  2485   // we need to do this _before_ wait_for_cleanup_complete(), to
  2558   // we need to do this _before_ wait_for_cleanup_complete(), to
  2486   // ensure that we do not give up the heap lock and potentially
  2559   // ensure that we do not give up the heap lock and potentially
  2487   // pick up the wrong count
  2560   // pick up the wrong count
  2488   int gc_count_before = SharedHeap::heap()->total_collections();
  2561   unsigned int gc_count_before = SharedHeap::heap()->total_collections();
  2489 
  2562 
  2490   // Don't want to do a GC pause while cleanup is being completed!
  2563   // Don't want to do a GC pause while cleanup is being completed!
  2491   wait_for_cleanup_complete();
  2564   wait_for_cleanup_complete();
  2492 
  2565 
  2493   g1_policy()->record_stop_world_start();
  2566   g1_policy()->record_stop_world_start();
  2494   {
  2567   {
  2495     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
  2568     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
  2496     VM_G1IncCollectionPause op(gc_count_before);
  2569     VM_G1IncCollectionPause op(gc_count_before,
       
  2570                                false, /* should_initiate_conc_mark */
       
  2571                                g1_policy()->max_pause_time_ms(),
       
  2572                                GCCause::_g1_inc_collection_pause);
  2497     VMThread::execute(&op);
  2573     VMThread::execute(&op);
  2498   }
  2574   }
  2499 }
  2575 }
  2500 
  2576 
  2501 void
  2577 void
  2610     return false;
  2686     return false;
  2611   }
  2687   }
  2612 };
  2688 };
  2613 
  2689 
  2614 void
  2690 void
  2615 G1CollectedHeap::do_collection_pause_at_safepoint() {
  2691 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  2616   if (GC_locker::check_active_before_gc()) {
  2692   if (GC_locker::check_active_before_gc()) {
  2617     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
  2693     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
  2618   }
  2694   }
  2619 
  2695 
  2620   if (PrintHeapAtGC) {
  2696   if (PrintHeapAtGC) {
  2635       if (g1_policy()->full_young_gcs())
  2711       if (g1_policy()->full_young_gcs())
  2636         strcat(verbose_str, "(young)");
  2712         strcat(verbose_str, "(young)");
  2637       else
  2713       else
  2638         strcat(verbose_str, "(partial)");
  2714         strcat(verbose_str, "(partial)");
  2639     }
  2715     }
  2640     if (g1_policy()->during_initial_mark_pause())
  2716     if (g1_policy()->during_initial_mark_pause()) {
  2641       strcat(verbose_str, " (initial-mark)");
  2717       strcat(verbose_str, " (initial-mark)");
       
  2718       // We are about to start a marking cycle, so we increment the
       
  2719       // full collection counter.
       
  2720       increment_total_full_collections();
       
  2721     }
  2642 
  2722 
  2643     // if PrintGCDetails is on, we'll print long statistics information
  2723     // if PrintGCDetails is on, we'll print long statistics information
  2644     // in the collector policy code, so let's not print this as the output
  2724     // in the collector policy code, so let's not print this as the output
  2645     // is messy if we do.
  2725     // is messy if we do.
  2646     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  2726     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  2659     if (g1_policy()->in_young_gc_mode()) {
  2739     if (g1_policy()->in_young_gc_mode()) {
  2660       assert(check_young_list_well_formed(),
  2740       assert(check_young_list_well_formed(),
  2661              "young list should be well formed");
  2741              "young list should be well formed");
  2662     }
  2742     }
  2663 
  2743 
  2664     bool abandoned = false;
       
  2665     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  2744     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  2666       IsGCActiveMark x;
  2745       IsGCActiveMark x;
  2667 
  2746 
  2668       gc_prologue(false);
  2747       gc_prologue(false);
  2669       increment_total_collections(false /* full gc */);
  2748       increment_total_collections(false /* full gc */);
  2741       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  2820       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  2742 #endif // YOUNG_LIST_VERBOSE
  2821 #endif // YOUNG_LIST_VERBOSE
  2743 
  2822 
  2744       // Now choose the CS. We may abandon a pause if we find no
  2823       // Now choose the CS. We may abandon a pause if we find no
  2745       // region that will fit in the MMU pause.
  2824       // region that will fit in the MMU pause.
  2746       bool abandoned = g1_policy()->choose_collection_set();
  2825       bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms);
  2747 
  2826 
  2748       // Nothing to do if we were unable to choose a collection set.
  2827       // Nothing to do if we were unable to choose a collection set.
  2749       if (!abandoned) {
  2828       if (!abandoned) {
  2750 #if G1_REM_SET_LOGGING
  2829 #if G1_REM_SET_LOGGING
  2751         gclog_or_tty->print_cr("\nAfter pause, heap:");
  2830         gclog_or_tty->print_cr("\nAfter pause, heap:");