hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
changeset 10670 4ea0e7d2ffbc
parent 10664 702062c83bd7
child 10671 431ff8629f97
equal deleted inserted replaced
10669:cfa6efbbc1b3 10670:4ea0e7d2ffbc
    40 #include "gc_implementation/g1/vm_operations_g1.hpp"
    40 #include "gc_implementation/g1/vm_operations_g1.hpp"
    41 #include "gc_implementation/shared/isGCActiveMark.hpp"
    41 #include "gc_implementation/shared/isGCActiveMark.hpp"
    42 #include "memory/gcLocker.inline.hpp"
    42 #include "memory/gcLocker.inline.hpp"
    43 #include "memory/genOopClosures.inline.hpp"
    43 #include "memory/genOopClosures.inline.hpp"
    44 #include "memory/generationSpec.hpp"
    44 #include "memory/generationSpec.hpp"
       
    45 #include "memory/referenceProcessor.hpp"
    45 #include "oops/oop.inline.hpp"
    46 #include "oops/oop.inline.hpp"
    46 #include "oops/oop.pcgc.inline.hpp"
    47 #include "oops/oop.pcgc.inline.hpp"
    47 #include "runtime/aprofiler.hpp"
    48 #include "runtime/aprofiler.hpp"
    48 #include "runtime/vmThread.hpp"
    49 #include "runtime/vmThread.hpp"
    49 
    50 
  1242     }
  1243     }
  1243     pre_full_gc_dump();
  1244     pre_full_gc_dump();
  1244 
  1245 
  1245     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1246     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1246 
  1247 
  1247     // We want to discover references, but not process them yet.
  1248     // Disable discovery and empty the discovered lists
  1248     // This mode is disabled in
  1249     // for the CM ref processor.
  1249     // instanceRefKlass::process_discovered_references if the
  1250     ref_processor_cm()->disable_discovery();
  1250     // generation does some collection work, or
  1251     ref_processor_cm()->abandon_partial_discovery();
  1251     // instanceRefKlass::enqueue_discovered_references if the
  1252     ref_processor_cm()->verify_no_references_recorded();
  1252     // generation returns without doing any work.
       
  1253     ref_processor()->disable_discovery();
       
  1254     ref_processor()->abandon_partial_discovery();
       
  1255     ref_processor()->verify_no_references_recorded();
       
  1256 
  1253 
  1257     // Abandon current iterations of concurrent marking and concurrent
  1254     // Abandon current iterations of concurrent marking and concurrent
  1258     // refinement, if any are in progress.
  1255     // refinement, if any are in progress.
  1259     concurrent_mark()->abort();
  1256     concurrent_mark()->abort();
  1260 
  1257 
  1278     g1_policy()->stop_incremental_cset_building();
  1275     g1_policy()->stop_incremental_cset_building();
  1279 
  1276 
  1280     empty_young_list();
  1277     empty_young_list();
  1281     g1_policy()->set_full_young_gcs(true);
  1278     g1_policy()->set_full_young_gcs(true);
  1282 
  1279 
  1283     // See the comment in G1CollectedHeap::ref_processing_init() about
  1280     // See the comments in g1CollectedHeap.hpp and
       
  1281     // G1CollectedHeap::ref_processing_init() about
  1284     // how reference processing currently works in G1.
  1282     // how reference processing currently works in G1.
  1285 
  1283 
  1286     // Temporarily make reference _discovery_ single threaded (non-MT).
  1284     // Temporarily make discovery by the STW ref processor single threaded (non-MT).
  1287     ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
  1285     ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
  1288 
  1286 
  1289     // Temporarily make refs discovery atomic
  1287     // Temporarily clear the STW ref processor's _is_alive_non_header field.
  1290     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
  1288     ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
  1291 
  1289 
  1292     // Temporarily clear _is_alive_non_header
  1290     ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
  1293     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
  1291     ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
  1294 
  1292 
  1295     ref_processor()->enable_discovery();
       
  1296     ref_processor()->setup_policy(do_clear_all_soft_refs);
       
  1297     // Do collection work
  1293     // Do collection work
  1298     {
  1294     {
  1299       HandleMark hm;  // Discard invalid handles created during gc
  1295       HandleMark hm;  // Discard invalid handles created during gc
  1300       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
  1296       G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
  1301     }
  1297     }
       
  1298 
  1302     assert(free_regions() == 0, "we should not have added any free regions");
  1299     assert(free_regions() == 0, "we should not have added any free regions");
  1303     rebuild_region_lists();
  1300     rebuild_region_lists();
  1304 
  1301 
  1305     _summary_bytes_used = recalculate_used();
  1302     _summary_bytes_used = recalculate_used();
  1306 
  1303 
  1307     ref_processor()->enqueue_discovered_references();
  1304     // Enqueue any discovered reference objects that have
       
  1305     // not been removed from the discovered lists.
       
  1306     ref_processor_stw()->enqueue_discovered_references();
  1308 
  1307 
  1309     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1308     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1310 
  1309 
  1311     MemoryService::track_memory_usage();
  1310     MemoryService::track_memory_usage();
  1312 
  1311 
  1317       Universe::verify(/* allow dirty */ false,
  1316       Universe::verify(/* allow dirty */ false,
  1318                        /* silent      */ false,
  1317                        /* silent      */ false,
  1319                        /* option      */ VerifyOption_G1UsePrevMarking);
  1318                        /* option      */ VerifyOption_G1UsePrevMarking);
  1320 
  1319 
  1321     }
  1320     }
  1322     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
  1321 
       
  1322     assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
       
  1323     ref_processor_stw()->verify_no_references_recorded();
       
  1324 
       
  1325     // Note: since we've just done a full GC, concurrent
       
  1326     // marking is no longer active. Therefore we need not
       
  1327     // re-enable reference discovery for the CM ref processor.
       
  1328     // That will be done at the start of the next marking cycle.
       
  1329     assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
       
  1330     ref_processor_cm()->verify_no_references_recorded();
  1323 
  1331 
  1324     reset_gc_time_stamp();
  1332     reset_gc_time_stamp();
  1325     // Since everything potentially moved, we will clear all remembered
  1333     // Since everything potentially moved, we will clear all remembered
  1326     // sets, and clear all cards.  Later we will rebuild remebered
  1334     // sets, and clear all cards.  Later we will rebuild remebered
  1327     // sets. We will also reset the GC time stamps of the regions.
  1335     // sets. We will also reset the GC time stamps of the regions.
  1770 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1778 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1771   SharedHeap(policy_),
  1779   SharedHeap(policy_),
  1772   _g1_policy(policy_),
  1780   _g1_policy(policy_),
  1773   _dirty_card_queue_set(false),
  1781   _dirty_card_queue_set(false),
  1774   _into_cset_dirty_card_queue_set(false),
  1782   _into_cset_dirty_card_queue_set(false),
  1775   _is_alive_closure(this),
  1783   _is_alive_closure_cm(this),
  1776   _ref_processor(NULL),
  1784   _is_alive_closure_stw(this),
       
  1785   _ref_processor_cm(NULL),
       
  1786   _ref_processor_stw(NULL),
  1777   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1787   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1778   _bot_shared(NULL),
  1788   _bot_shared(NULL),
  1779   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1789   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1780   _evac_failure_scan_stack(NULL) ,
  1790   _evac_failure_scan_stack(NULL) ,
  1781   _mark_in_progress(false),
  1791   _mark_in_progress(false),
  2065 }
  2075 }
  2066 
  2076 
  2067 void G1CollectedHeap::ref_processing_init() {
  2077 void G1CollectedHeap::ref_processing_init() {
  2068   // Reference processing in G1 currently works as follows:
  2078   // Reference processing in G1 currently works as follows:
  2069   //
  2079   //
  2070   // * There is only one reference processor instance that
  2080   // * There are two reference processor instances. One is
  2071   //   'spans' the entire heap. It is created by the code
  2081   //   used to record and process discovered references
  2072   //   below.
  2082   //   during concurrent marking; the other is used to
  2073   // * Reference discovery is not enabled during an incremental
  2083   //   record and process references during STW pauses
  2074   //   pause (see 6484982).
  2084   //   (both full and incremental).
  2075   // * Discoverered refs are not enqueued nor are they processed
  2085   // * Both ref processors need to 'span' the entire heap as
  2076   //   during an incremental pause (see 6484982).
  2086   //   the regions in the collection set may be dotted around.
  2077   // * Reference discovery is enabled at initial marking.
  2087   //
  2078   // * Reference discovery is disabled and the discovered
  2088   // * For the concurrent marking ref processor:
  2079   //   references processed etc during remarking.
  2089   //   * Reference discovery is enabled at initial marking.
  2080   // * Reference discovery is MT (see below).
  2090   //   * Reference discovery is disabled and the discovered
  2081   // * Reference discovery requires a barrier (see below).
  2091   //     references processed etc during remarking.
  2082   // * Reference processing is currently not MT (see 6608385).
  2092   //   * Reference discovery is MT (see below).
  2083   // * A full GC enables (non-MT) reference discovery and
  2093   //   * Reference discovery requires a barrier (see below).
  2084   //   processes any discovered references.
  2094   //   * Reference processing may or may not be MT
       
  2095   //     (depending on the value of ParallelRefProcEnabled
       
  2096   //     and ParallelGCThreads).
       
  2097   //   * A full GC disables reference discovery by the CM
       
  2098   //     ref processor and abandons any entries on it's
       
  2099   //     discovered lists.
       
  2100   //
       
  2101   // * For the STW processor:
       
  2102   //   * Non MT discovery is enabled at the start of a full GC.
       
  2103   //   * Processing and enqueueing during a full GC is non-MT.
       
  2104   //   * During a full GC, references are processed after marking.
       
  2105   //
       
  2106   //   * Discovery (may or may not be MT) is enabled at the start
       
  2107   //     of an incremental evacuation pause.
       
  2108   //   * References are processed near the end of a STW evacuation pause.
       
  2109   //   * For both types of GC:
       
  2110   //     * Discovery is atomic - i.e. not concurrent.
       
  2111   //     * Reference discovery will not need a barrier.
  2085 
  2112 
  2086   SharedHeap::ref_processing_init();
  2113   SharedHeap::ref_processing_init();
  2087   MemRegion mr = reserved_region();
  2114   MemRegion mr = reserved_region();
  2088   _ref_processor =
  2115 
       
  2116   // Concurrent Mark ref processor
       
  2117   _ref_processor_cm =
  2089     new ReferenceProcessor(mr,    // span
  2118     new ReferenceProcessor(mr,    // span
  2090                            ParallelRefProcEnabled && (ParallelGCThreads > 1),    // mt processing
  2119                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2091                            (int) ParallelGCThreads,   // degree of mt processing
  2120                                 // mt processing
  2092                            ParallelGCThreads > 1 || ConcGCThreads > 1,  // mt discovery
  2121                            (int) ParallelGCThreads,
  2093                            (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
  2122                                 // degree of mt processing
  2094                            false,                     // Reference discovery is not atomic
  2123                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
  2095                            &_is_alive_closure,        // is alive closure for efficiency
  2124                                 // mt discovery
  2096                            true);                     // Setting next fields of discovered
  2125                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
  2097                                                       // lists requires a barrier.
  2126                                 // degree of mt discovery
       
  2127                            false,
       
  2128                                 // Reference discovery is not atomic
       
  2129                            &_is_alive_closure_cm,
       
  2130                                 // is alive closure
       
  2131                                 // (for efficiency/performance)
       
  2132                            true);
       
  2133                                 // Setting next fields of discovered
       
  2134                                 // lists requires a barrier.
       
  2135 
       
  2136   // STW ref processor
       
  2137   _ref_processor_stw =
       
  2138     new ReferenceProcessor(mr,    // span
       
  2139                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
       
  2140                                 // mt processing
       
  2141                            MAX2((int)ParallelGCThreads, 1),
       
  2142                                 // degree of mt processing
       
  2143                            (ParallelGCThreads > 1),
       
  2144                                 // mt discovery
       
  2145                            MAX2((int)ParallelGCThreads, 1),
       
  2146                                 // degree of mt discovery
       
  2147                            true,
       
  2148                                 // Reference discovery is atomic
       
  2149                            &_is_alive_closure_stw,
       
  2150                                 // is alive closure
       
  2151                                 // (for efficiency/performance)
       
  2152                            false);
       
  2153                                 // Setting next fields of discovered
       
  2154                                 // lists requires a barrier.
  2098 }
  2155 }
  2099 
  2156 
  2100 size_t G1CollectedHeap::capacity() const {
  2157 size_t G1CollectedHeap::capacity() const {
  2101   return _g1_committed.byte_size();
  2158   return _g1_committed.byte_size();
  2102 }
  2159 }
  3115   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  3172   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  3116   // is set.
  3173   // is set.
  3117   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  3174   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  3118                         "derived pointer present"));
  3175                         "derived pointer present"));
  3119   // always_do_update_barrier = true;
  3176   // always_do_update_barrier = true;
       
  3177 
       
  3178   // We have just completed a GC. Update the soft reference
       
  3179   // policy with the new heap occupancy
       
  3180   Universe::update_heap_info_at_gc();
  3120 }
  3181 }
  3121 
  3182 
  3122 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  3183 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  3123                                                unsigned int gc_count_before,
  3184                                                unsigned int gc_count_before,
  3124                                                bool* succeeded) {
  3185                                                bool* succeeded) {
  3352 
  3413 
  3353       }
  3414       }
  3354 
  3415 
  3355       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3416       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3356 
  3417 
  3357       // Please see comment in G1CollectedHeap::ref_processing_init()
  3418       // Please see comment in g1CollectedHeap.hpp and
  3358       // to see how reference processing currently works in G1.
  3419       // G1CollectedHeap::ref_processing_init() to see how
  3359       //
  3420       // reference processing currently works in G1.
  3360       // We want to turn off ref discovery, if necessary, and turn it back on
  3421 
  3361       // on again later if we do. XXX Dubious: why is discovery disabled?
  3422       // Enable discovery in the STW reference processor
  3362       bool was_enabled = ref_processor()->discovery_enabled();
  3423       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
  3363       if (was_enabled) ref_processor()->disable_discovery();
  3424                                             true /*verify_no_refs*/);
  3364 
  3425 
  3365       // Forget the current alloc region (we might even choose it to be part
  3426       {
  3366       // of the collection set!).
  3427         // We want to temporarily turn off discovery by the
  3367       release_mutator_alloc_region();
  3428         // CM ref processor, if necessary, and turn it back on
  3368 
  3429         // on again later if we do. Using a scoped
  3369       // We should call this after we retire the mutator alloc
  3430         // NoRefDiscovery object will do this.
  3370       // region(s) so that all the ALLOC / RETIRE events are generated
  3431         NoRefDiscovery no_cm_discovery(ref_processor_cm());
  3371       // before the start GC event.
  3432 
  3372       _hr_printer.start_gc(false /* full */, (size_t) total_collections());
  3433         // Forget the current alloc region (we might even choose it to be part
  3373 
  3434         // of the collection set!).
  3374       // The elapsed time induced by the start time below deliberately elides
  3435         release_mutator_alloc_region();
  3375       // the possible verification above.
  3436 
  3376       double start_time_sec = os::elapsedTime();
  3437         // We should call this after we retire the mutator alloc
  3377       size_t start_used_bytes = used();
  3438         // region(s) so that all the ALLOC / RETIRE events are generated
       
  3439         // before the start GC event.
       
  3440         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
       
  3441 
       
  3442         // The elapsed time induced by the start time below deliberately elides
       
  3443         // the possible verification above.
       
  3444         double start_time_sec = os::elapsedTime();
       
  3445         size_t start_used_bytes = used();
  3378 
  3446 
  3379 #if YOUNG_LIST_VERBOSE
  3447 #if YOUNG_LIST_VERBOSE
  3380       gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  3448         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  3381       _young_list->print();
  3449         _young_list->print();
  3382       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3450         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3383 #endif // YOUNG_LIST_VERBOSE
  3451 #endif // YOUNG_LIST_VERBOSE
  3384 
  3452 
  3385       g1_policy()->record_collection_pause_start(start_time_sec,
  3453         g1_policy()->record_collection_pause_start(start_time_sec,
  3386                                                  start_used_bytes);
  3454                                                    start_used_bytes);
  3387 
  3455 
  3388 #if YOUNG_LIST_VERBOSE
  3456 #if YOUNG_LIST_VERBOSE
  3389       gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  3457         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  3390       _young_list->print();
  3458         _young_list->print();
  3391 #endif // YOUNG_LIST_VERBOSE
  3459 #endif // YOUNG_LIST_VERBOSE
  3392 
  3460 
  3393       if (g1_policy()->during_initial_mark_pause()) {
  3461         if (g1_policy()->during_initial_mark_pause()) {
  3394         concurrent_mark()->checkpointRootsInitialPre();
  3462           concurrent_mark()->checkpointRootsInitialPre();
  3395       }
  3463         }
  3396       perm_gen()->save_marks();
  3464         perm_gen()->save_marks();
  3397 
  3465 
  3398       // We must do this before any possible evacuation that should propagate
  3466         // We must do this before any possible evacuation that should propagate
  3399       // marks.
  3467         // marks.
  3400       if (mark_in_progress()) {
  3468         if (mark_in_progress()) {
  3401         double start_time_sec = os::elapsedTime();
  3469           double start_time_sec = os::elapsedTime();
  3402 
  3470 
  3403         _cm->drainAllSATBBuffers();
  3471           _cm->drainAllSATBBuffers();
  3404         double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  3472           double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  3405         g1_policy()->record_satb_drain_time(finish_mark_ms);
  3473           g1_policy()->record_satb_drain_time(finish_mark_ms);
  3406       }
  3474         }
  3407       // Record the number of elements currently on the mark stack, so we
  3475         // Record the number of elements currently on the mark stack, so we
  3408       // only iterate over these.  (Since evacuation may add to the mark
  3476         // only iterate over these.  (Since evacuation may add to the mark
  3409       // stack, doing more exposes race conditions.)  If no mark is in
  3477         // stack, doing more exposes race conditions.)  If no mark is in
  3410       // progress, this will be zero.
  3478         // progress, this will be zero.
  3411       _cm->set_oops_do_bound();
  3479         _cm->set_oops_do_bound();
  3412 
  3480 
  3413       if (mark_in_progress()) {
  3481         if (mark_in_progress()) {
  3414         concurrent_mark()->newCSet();
  3482           concurrent_mark()->newCSet();
  3415       }
  3483         }
  3416 
  3484 
  3417 #if YOUNG_LIST_VERBOSE
  3485 #if YOUNG_LIST_VERBOSE
  3418       gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  3486         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  3419       _young_list->print();
  3487         _young_list->print();
  3420       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3488         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3421 #endif // YOUNG_LIST_VERBOSE
  3489 #endif // YOUNG_LIST_VERBOSE
  3422 
  3490 
  3423       g1_policy()->choose_collection_set(target_pause_time_ms);
  3491         g1_policy()->choose_collection_set(target_pause_time_ms);
  3424 
  3492 
  3425       if (_hr_printer.is_active()) {
  3493         if (_hr_printer.is_active()) {
  3426         HeapRegion* hr = g1_policy()->collection_set();
  3494           HeapRegion* hr = g1_policy()->collection_set();
  3427         while (hr != NULL) {
  3495           while (hr != NULL) {
  3428           G1HRPrinter::RegionType type;
  3496             G1HRPrinter::RegionType type;
  3429           if (!hr->is_young()) {
  3497             if (!hr->is_young()) {
  3430             type = G1HRPrinter::Old;
  3498               type = G1HRPrinter::Old;
  3431           } else if (hr->is_survivor()) {
  3499             } else if (hr->is_survivor()) {
  3432             type = G1HRPrinter::Survivor;
  3500               type = G1HRPrinter::Survivor;
  3433           } else {
  3501             } else {
  3434             type = G1HRPrinter::Eden;
  3502               type = G1HRPrinter::Eden;
  3435           }
  3503             }
  3436           _hr_printer.cset(hr);
  3504             _hr_printer.cset(hr);
  3437           hr = hr->next_in_collection_set();
  3505             hr = hr->next_in_collection_set();
  3438         }
       
  3439       }
       
  3440 
       
  3441       // We have chosen the complete collection set. If marking is
       
  3442       // active then, we clear the region fields of any of the
       
  3443       // concurrent marking tasks whose region fields point into
       
  3444       // the collection set as these values will become stale. This
       
  3445       // will cause the owning marking threads to claim a new region
       
  3446       // when marking restarts.
       
  3447       if (mark_in_progress()) {
       
  3448         concurrent_mark()->reset_active_task_region_fields_in_cset();
       
  3449       }
       
  3450 
       
  3451 #ifdef ASSERT
       
  3452       VerifyCSetClosure cl;
       
  3453       collection_set_iterate(&cl);
       
  3454 #endif // ASSERT
       
  3455 
       
  3456       setup_surviving_young_words();
       
  3457 
       
  3458       // Initialize the GC alloc regions.
       
  3459       init_gc_alloc_regions();
       
  3460 
       
  3461       // Actually do the work...
       
  3462       evacuate_collection_set();
       
  3463 
       
  3464       free_collection_set(g1_policy()->collection_set());
       
  3465       g1_policy()->clear_collection_set();
       
  3466 
       
  3467       cleanup_surviving_young_words();
       
  3468 
       
  3469       // Start a new incremental collection set for the next pause.
       
  3470       g1_policy()->start_incremental_cset_building();
       
  3471 
       
  3472       // Clear the _cset_fast_test bitmap in anticipation of adding
       
  3473       // regions to the incremental collection set for the next
       
  3474       // evacuation pause.
       
  3475       clear_cset_fast_test();
       
  3476 
       
  3477       _young_list->reset_sampled_info();
       
  3478 
       
  3479       // Don't check the whole heap at this point as the
       
  3480       // GC alloc regions from this pause have been tagged
       
  3481       // as survivors and moved on to the survivor list.
       
  3482       // Survivor regions will fail the !is_young() check.
       
  3483       assert(check_young_list_empty(false /* check_heap */),
       
  3484         "young list should be empty");
       
  3485 
       
  3486 #if YOUNG_LIST_VERBOSE
       
  3487       gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
       
  3488       _young_list->print();
       
  3489 #endif // YOUNG_LIST_VERBOSE
       
  3490 
       
  3491       g1_policy()->record_survivor_regions(_young_list->survivor_length(),
       
  3492         _young_list->first_survivor_region(),
       
  3493         _young_list->last_survivor_region());
       
  3494 
       
  3495       _young_list->reset_auxilary_lists();
       
  3496 
       
  3497       if (evacuation_failed()) {
       
  3498         _summary_bytes_used = recalculate_used();
       
  3499       } else {
       
  3500         // The "used" of the the collection set have already been subtracted
       
  3501         // when they were freed.  Add in the bytes evacuated.
       
  3502         _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
       
  3503       }
       
  3504 
       
  3505       if (g1_policy()->during_initial_mark_pause()) {
       
  3506         concurrent_mark()->checkpointRootsInitialPost();
       
  3507         set_marking_started();
       
  3508         // CAUTION: after the doConcurrentMark() call below,
       
  3509         // the concurrent marking thread(s) could be running
       
  3510         // concurrently with us. Make sure that anything after
       
  3511         // this point does not assume that we are the only GC thread
       
  3512         // running. Note: of course, the actual marking work will
       
  3513         // not start until the safepoint itself is released in
       
  3514         // ConcurrentGCThread::safepoint_desynchronize().
       
  3515         doConcurrentMark();
       
  3516       }
       
  3517 
       
  3518       allocate_dummy_regions();
       
  3519 
       
  3520 #if YOUNG_LIST_VERBOSE
       
  3521       gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
       
  3522       _young_list->print();
       
  3523       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
       
  3524 #endif // YOUNG_LIST_VERBOSE
       
  3525 
       
  3526       init_mutator_alloc_region();
       
  3527 
       
  3528       {
       
  3529         size_t expand_bytes = g1_policy()->expansion_amount();
       
  3530         if (expand_bytes > 0) {
       
  3531           size_t bytes_before = capacity();
       
  3532           if (!expand(expand_bytes)) {
       
  3533             // We failed to expand the heap so let's verify that
       
  3534             // committed/uncommitted amount match the backing store
       
  3535             assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
       
  3536             assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
       
  3537           }
  3506           }
  3538         }
  3507         }
       
  3508 
       
  3509         // We have chosen the complete collection set. If marking is
       
  3510         // active then, we clear the region fields of any of the
       
  3511         // concurrent marking tasks whose region fields point into
       
  3512         // the collection set as these values will become stale. This
       
  3513         // will cause the owning marking threads to claim a new region
       
  3514         // when marking restarts.
       
  3515         if (mark_in_progress()) {
       
  3516           concurrent_mark()->reset_active_task_region_fields_in_cset();
       
  3517         }
       
  3518 
       
  3519 #ifdef ASSERT
       
  3520         VerifyCSetClosure cl;
       
  3521         collection_set_iterate(&cl);
       
  3522 #endif // ASSERT
       
  3523 
       
  3524         setup_surviving_young_words();
       
  3525 
       
  3526         // Initialize the GC alloc regions.
       
  3527         init_gc_alloc_regions();
       
  3528 
       
  3529         // Actually do the work...
       
  3530         evacuate_collection_set();
       
  3531 
       
  3532         free_collection_set(g1_policy()->collection_set());
       
  3533         g1_policy()->clear_collection_set();
       
  3534 
       
  3535         cleanup_surviving_young_words();
       
  3536 
       
  3537         // Start a new incremental collection set for the next pause.
       
  3538         g1_policy()->start_incremental_cset_building();
       
  3539 
       
  3540         // Clear the _cset_fast_test bitmap in anticipation of adding
       
  3541         // regions to the incremental collection set for the next
       
  3542         // evacuation pause.
       
  3543         clear_cset_fast_test();
       
  3544 
       
  3545         _young_list->reset_sampled_info();
       
  3546 
       
  3547         // Don't check the whole heap at this point as the
       
  3548         // GC alloc regions from this pause have been tagged
       
  3549         // as survivors and moved on to the survivor list.
       
  3550         // Survivor regions will fail the !is_young() check.
       
  3551         assert(check_young_list_empty(false /* check_heap */),
       
  3552           "young list should be empty");
       
  3553 
       
  3554 #if YOUNG_LIST_VERBOSE
       
  3555         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
       
  3556         _young_list->print();
       
  3557 #endif // YOUNG_LIST_VERBOSE
       
  3558 
       
  3559         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
       
  3560                                             _young_list->first_survivor_region(),
       
  3561                                             _young_list->last_survivor_region());
       
  3562 
       
  3563         _young_list->reset_auxilary_lists();
       
  3564 
       
  3565         if (evacuation_failed()) {
       
  3566           _summary_bytes_used = recalculate_used();
       
  3567         } else {
       
  3568           // The "used" of the the collection set have already been subtracted
       
  3569           // when they were freed.  Add in the bytes evacuated.
       
  3570           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
       
  3571         }
       
  3572 
       
  3573         if (g1_policy()->during_initial_mark_pause()) {
       
  3574           concurrent_mark()->checkpointRootsInitialPost();
       
  3575           set_marking_started();
       
  3576           // CAUTION: after the doConcurrentMark() call below,
       
  3577           // the concurrent marking thread(s) could be running
       
  3578           // concurrently with us. Make sure that anything after
       
  3579           // this point does not assume that we are the only GC thread
       
  3580           // running. Note: of course, the actual marking work will
       
  3581           // not start until the safepoint itself is released in
       
  3582           // ConcurrentGCThread::safepoint_desynchronize().
       
  3583           doConcurrentMark();
       
  3584         }
       
  3585 
       
  3586         allocate_dummy_regions();
       
  3587 
       
  3588 #if YOUNG_LIST_VERBOSE
       
  3589         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
       
  3590         _young_list->print();
       
  3591         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
       
  3592 #endif // YOUNG_LIST_VERBOSE
       
  3593 
       
  3594         init_mutator_alloc_region();
       
  3595 
       
  3596         {
       
  3597           size_t expand_bytes = g1_policy()->expansion_amount();
       
  3598           if (expand_bytes > 0) {
       
  3599             size_t bytes_before = capacity();
       
  3600             if (!expand(expand_bytes)) {
       
  3601               // We failed to expand the heap so let's verify that
       
  3602               // committed/uncommitted amount match the backing store
       
  3603               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
       
  3604               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
       
  3605             }
       
  3606           }
       
  3607         }
       
  3608 
       
  3609         double end_time_sec = os::elapsedTime();
       
  3610         double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
       
  3611         g1_policy()->record_pause_time_ms(pause_time_ms);
       
  3612         g1_policy()->record_collection_pause_end();
       
  3613 
       
  3614         MemoryService::track_memory_usage();
       
  3615 
       
  3616         // In prepare_for_verify() below we'll need to scan the deferred
       
  3617         // update buffers to bring the RSets up-to-date if
       
  3618         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
       
  3619         // the update buffers we'll probably need to scan cards on the
       
  3620         // regions we just allocated to (i.e., the GC alloc
       
  3621         // regions). However, during the last GC we called
       
  3622         // set_saved_mark() on all the GC alloc regions, so card
       
  3623         // scanning might skip the [saved_mark_word()...top()] area of
       
  3624         // those regions (i.e., the area we allocated objects into
       
  3625         // during the last GC). But it shouldn't. Given that
       
  3626         // saved_mark_word() is conditional on whether the GC time stamp
       
  3627         // on the region is current or not, by incrementing the GC time
       
  3628         // stamp here we invalidate all the GC time stamps on all the
       
  3629         // regions and saved_mark_word() will simply return top() for
       
  3630         // all the regions. This is a nicer way of ensuring this rather
       
  3631         // than iterating over the regions and fixing them. In fact, the
       
  3632         // GC time stamp increment here also ensures that
       
  3633         // saved_mark_word() will return top() between pauses, i.e.,
       
  3634         // during concurrent refinement. So we don't need the
       
  3635         // is_gc_active() check to decided which top to use when
       
  3636         // scanning cards (see CR 7039627).
       
  3637         increment_gc_time_stamp();
       
  3638 
       
  3639         if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
       
  3640           HandleMark hm;  // Discard invalid handles created during verification
       
  3641           gclog_or_tty->print(" VerifyAfterGC:");
       
  3642           prepare_for_verify();
       
  3643           Universe::verify(/* allow dirty */ true,
       
  3644                            /* silent      */ false,
       
  3645                            /* option      */ VerifyOption_G1UsePrevMarking);
       
  3646         }
       
  3647 
       
  3648         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
       
  3649         ref_processor_stw()->verify_no_references_recorded();
       
  3650 
       
  3651         // CM reference discovery will be re-enabled if necessary.
  3539       }
  3652       }
  3540 
       
  3541       double end_time_sec = os::elapsedTime();
       
  3542       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
       
  3543       g1_policy()->record_pause_time_ms(pause_time_ms);
       
  3544       g1_policy()->record_collection_pause_end();
       
  3545 
       
  3546       MemoryService::track_memory_usage();
       
  3547 
       
  3548       // In prepare_for_verify() below we'll need to scan the deferred
       
  3549       // update buffers to bring the RSets up-to-date if
       
  3550       // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
       
  3551       // the update buffers we'll probably need to scan cards on the
       
  3552       // regions we just allocated to (i.e., the GC alloc
       
  3553       // regions). However, during the last GC we called
       
  3554       // set_saved_mark() on all the GC alloc regions, so card
       
  3555       // scanning might skip the [saved_mark_word()...top()] area of
       
  3556       // those regions (i.e., the area we allocated objects into
       
  3557       // during the last GC). But it shouldn't. Given that
       
  3558       // saved_mark_word() is conditional on whether the GC time stamp
       
  3559       // on the region is current or not, by incrementing the GC time
       
  3560       // stamp here we invalidate all the GC time stamps on all the
       
  3561       // regions and saved_mark_word() will simply return top() for
       
  3562       // all the regions. This is a nicer way of ensuring this rather
       
  3563       // than iterating over the regions and fixing them. In fact, the
       
  3564       // GC time stamp increment here also ensures that
       
  3565       // saved_mark_word() will return top() between pauses, i.e.,
       
  3566       // during concurrent refinement. So we don't need the
       
  3567       // is_gc_active() check to decided which top to use when
       
  3568       // scanning cards (see CR 7039627).
       
  3569       increment_gc_time_stamp();
       
  3570 
       
  3571       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
       
  3572         HandleMark hm;  // Discard invalid handles created during verification
       
  3573         gclog_or_tty->print(" VerifyAfterGC:");
       
  3574         prepare_for_verify();
       
  3575         Universe::verify(/* allow dirty */ true,
       
  3576                          /* silent      */ false,
       
  3577                          /* option      */ VerifyOption_G1UsePrevMarking);
       
  3578       }
       
  3579 
       
  3580       if (was_enabled) ref_processor()->enable_discovery();
       
  3581 
  3653 
  3582       {
  3654       {
  3583         size_t expand_bytes = g1_policy()->expansion_amount();
  3655         size_t expand_bytes = g1_policy()->expansion_amount();
  3584         if (expand_bytes > 0) {
  3656         if (expand_bytes > 0) {
  3585           size_t bytes_before = capacity();
  3657           size_t bytes_before = capacity();
  3725          "Postcondition");
  3797          "Postcondition");
  3726   assert(!_drain_in_progress, "Postcondition");
  3798   assert(!_drain_in_progress, "Postcondition");
  3727   delete _evac_failure_scan_stack;
  3799   delete _evac_failure_scan_stack;
  3728   _evac_failure_scan_stack = NULL;
  3800   _evac_failure_scan_stack = NULL;
  3729 }
  3801 }
  3730 
       
  3731 // *** Sequential G1 Evacuation
       
  3732 
       
  3733 class G1IsAliveClosure: public BoolObjectClosure {
       
  3734   G1CollectedHeap* _g1;
       
  3735 public:
       
  3736   G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
       
  3737   void do_object(oop p) { assert(false, "Do not call."); }
       
  3738   bool do_object_b(oop p) {
       
  3739     // It is reachable if it is outside the collection set, or is inside
       
  3740     // and forwarded.
       
  3741     return !_g1->obj_in_cs(p) || p->is_forwarded();
       
  3742   }
       
  3743 };
       
  3744 
       
  3745 class G1KeepAliveClosure: public OopClosure {
       
  3746   G1CollectedHeap* _g1;
       
  3747 public:
       
  3748   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
       
  3749   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
       
  3750   void do_oop(      oop* p) {
       
  3751     oop obj = *p;
       
  3752     if (_g1->obj_in_cs(obj)) {
       
  3753       assert( obj->is_forwarded(), "invariant" );
       
  3754       *p = obj->forwardee();
       
  3755     }
       
  3756   }
       
  3757 };
       
  3758 
  3802 
  3759 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  3803 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  3760 private:
  3804 private:
  3761   G1CollectedHeap* _g1;
  3805   G1CollectedHeap* _g1;
  3762   DirtyCardQueue *_dcq;
  3806   DirtyCardQueue *_dcq;
  4184   }
  4228   }
  4185 }
  4229 }
  4186 #endif // ASSERT
  4230 #endif // ASSERT
  4187 
  4231 
  4188 void G1ParScanThreadState::trim_queue() {
  4232 void G1ParScanThreadState::trim_queue() {
       
  4233   assert(_evac_cl != NULL, "not set");
       
  4234   assert(_evac_failure_cl != NULL, "not set");
       
  4235   assert(_partial_scan_cl != NULL, "not set");
       
  4236 
  4189   StarTask ref;
  4237   StarTask ref;
  4190   do {
  4238   do {
  4191     // Drain the overflow stack first, so other threads can steal.
  4239     // Drain the overflow stack first, so other threads can steal.
  4192     while (refs()->pop_overflow(ref)) {
  4240     while (refs()->pop_overflow(ref)) {
  4193       deal_with_reference(ref);
  4241       deal_with_reference(ref);
  4194     }
  4242     }
       
  4243 
  4195     while (refs()->pop_local(ref)) {
  4244     while (refs()->pop_local(ref)) {
  4196       deal_with_reference(ref);
  4245       deal_with_reference(ref);
  4197     }
  4246     }
  4198   } while (!refs()->is_empty());
  4247   } while (!refs()->is_empty());
  4199 }
  4248 }
  4527     _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
  4576     _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
  4528 
  4577 
  4529     ResourceMark rm;
  4578     ResourceMark rm;
  4530     HandleMark   hm;
  4579     HandleMark   hm;
  4531 
  4580 
       
  4581     ReferenceProcessor*             rp = _g1h->ref_processor_stw();
       
  4582 
  4532     G1ParScanThreadState            pss(_g1h, i);
  4583     G1ParScanThreadState            pss(_g1h, i);
  4533     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
  4584     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
  4534     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
  4585     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
  4535     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
  4586     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
  4536 
  4587 
  4537     pss.set_evac_closure(&scan_evac_cl);
  4588     pss.set_evac_closure(&scan_evac_cl);
  4538     pss.set_evac_failure_closure(&evac_failure_cl);
  4589     pss.set_evac_failure_closure(&evac_failure_cl);
  4539     pss.set_partial_scan_closure(&partial_scan_cl);
  4590     pss.set_partial_scan_closure(&partial_scan_cl);
  4540 
  4591 
  4541     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  4592     G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
  4542     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  4593     G1ParScanPermClosure           only_scan_perm_cl(_g1h, &pss, rp);
  4543     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  4594 
  4544     G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  4595     G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
  4545 
  4596     G1ParScanAndMarkPermClosure    scan_mark_perm_cl(_g1h, &pss, rp);
  4546     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  4597 
  4547     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  4598     OopClosure*                    scan_root_cl = &only_scan_root_cl;
  4548     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  4599     OopsInHeapRegionClosure*       scan_perm_cl = &only_scan_perm_cl;
  4549 
       
  4550     OopsInHeapRegionClosure        *scan_root_cl;
       
  4551     OopsInHeapRegionClosure        *scan_perm_cl;
       
  4552 
  4600 
  4553     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4601     if (_g1h->g1_policy()->during_initial_mark_pause()) {
       
  4602       // We also need to mark copied objects.
  4554       scan_root_cl = &scan_mark_root_cl;
  4603       scan_root_cl = &scan_mark_root_cl;
  4555       scan_perm_cl = &scan_mark_perm_cl;
  4604       scan_perm_cl = &scan_mark_perm_cl;
  4556     } else {
  4605     }
  4557       scan_root_cl = &only_scan_root_cl;
  4606 
  4558       scan_perm_cl = &only_scan_perm_cl;
  4607     // The following closure is used to scan RSets looking for reference
  4559     }
  4608     // fields that point into the collection set. The actual field iteration
       
  4609     // is performed by a FilterIntoCSClosure, whose do_oop method calls the
       
  4610     // do_oop method of the following closure.
       
  4611     // Therefore we want to record the reference processor in the
       
  4612     // FilterIntoCSClosure. To do so we record the STW reference
       
  4613     // processor into the following closure and pass it to the
       
  4614     // FilterIntoCSClosure in HeapRegionDCTOC::walk_mem_region_with_cl.
       
  4615     G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss, rp);
  4560 
  4616 
  4561     pss.start_strong_roots();
  4617     pss.start_strong_roots();
  4562     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  4618     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  4563                                   SharedHeap::SO_AllClasses,
  4619                                   SharedHeap::SO_AllClasses,
  4564                                   scan_root_cl,
  4620                                   scan_root_cl,
  4603                         SharedHeap::ScanningOption so,
  4659                         SharedHeap::ScanningOption so,
  4604                         OopClosure* scan_non_heap_roots,
  4660                         OopClosure* scan_non_heap_roots,
  4605                         OopsInHeapRegionClosure* scan_rs,
  4661                         OopsInHeapRegionClosure* scan_rs,
  4606                         OopsInGenClosure* scan_perm,
  4662                         OopsInGenClosure* scan_perm,
  4607                         int worker_i) {
  4663                         int worker_i) {
       
  4664 
  4608   // First scan the strong roots, including the perm gen.
  4665   // First scan the strong roots, including the perm gen.
  4609   double ext_roots_start = os::elapsedTime();
  4666   double ext_roots_start = os::elapsedTime();
  4610   double closure_app_time_sec = 0.0;
  4667   double closure_app_time_sec = 0.0;
  4611 
  4668 
  4612   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4669   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4621                        collecting_perm_gen, so,
  4678                        collecting_perm_gen, so,
  4622                        &buf_scan_non_heap_roots,
  4679                        &buf_scan_non_heap_roots,
  4623                        &eager_scan_code_roots,
  4680                        &eager_scan_code_roots,
  4624                        &buf_scan_perm);
  4681                        &buf_scan_perm);
  4625 
  4682 
  4626   // Now the ref_processor roots.
  4683   // Now the CM ref_processor roots.
  4627   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4684   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4628     // We need to treat the discovered reference lists as roots and
  4685     // We need to treat the discovered reference lists of the
  4629     // keep entries (which are added by the marking threads) on them
  4686     // concurrent mark ref processor as roots and keep entries
  4630     // live until they can be processed at the end of marking.
  4687     // (which are added by the marking threads) on them live
  4631     ref_processor()->weak_oops_do(&buf_scan_non_heap_roots);
  4688     // until they can be processed at the end of marking.
       
  4689     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  4632   }
  4690   }
  4633 
  4691 
  4634   // Finish up any enqueued closure apps (attributed as object copy time).
  4692   // Finish up any enqueued closure apps (attributed as object copy time).
  4635   buf_scan_non_heap_roots.done();
  4693   buf_scan_non_heap_roots.done();
  4636   buf_scan_perm.done();
  4694   buf_scan_perm.done();
  4667                                        OopClosure* non_root_closure) {
  4725                                        OopClosure* non_root_closure) {
  4668   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
  4726   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
  4669   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
  4727   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
  4670 }
  4728 }
  4671 
  4729 
       
  4730 // Weak Reference Processing support
       
  4731 
       
  4732 // An always "is_alive" closure that is used to preserve referents.
       
  4733 // If the object is non-null then it's alive.  Used in the preservation
       
  4734 // of referent objects that are pointed to by reference objects
       
  4735 // discovered by the CM ref processor.
       
  4736 class G1AlwaysAliveClosure: public BoolObjectClosure {
       
  4737   G1CollectedHeap* _g1;
       
  4738 public:
       
  4739   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
       
  4740   void do_object(oop p) { assert(false, "Do not call."); }
       
  4741   bool do_object_b(oop p) {
       
  4742     if (p != NULL) {
       
  4743       return true;
       
  4744     }
       
  4745     return false;
       
  4746   }
       
  4747 };
       
  4748 
       
  4749 bool G1STWIsAliveClosure::do_object_b(oop p) {
       
  4750   // An object is reachable if it is outside the collection set,
       
  4751   // or is inside and copied.
       
  4752   return !_g1->obj_in_cs(p) || p->is_forwarded();
       
  4753 }
       
  4754 
       
  4755 // Non Copying Keep Alive closure
       
  4756 class G1KeepAliveClosure: public OopClosure {
       
  4757   G1CollectedHeap* _g1;
       
  4758 public:
       
  4759   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
       
  4760   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
       
  4761   void do_oop(      oop* p) {
       
  4762     oop obj = *p;
       
  4763 
       
  4764     if (_g1->obj_in_cs(obj)) {
       
  4765       assert( obj->is_forwarded(), "invariant" );
       
  4766       *p = obj->forwardee();
       
  4767     }
       
  4768   }
       
  4769 };
       
  4770 
       
  4771 // Copying Keep Alive closure - can be called from both
       
  4772 // serial and parallel code as long as different worker
       
  4773 // threads utilize different G1ParScanThreadState instances
       
  4774 // and different queues.
       
  4775 
       
  4776 class G1CopyingKeepAliveClosure: public OopClosure {
       
  4777   G1CollectedHeap*         _g1h;
       
  4778   OopClosure*              _copy_non_heap_obj_cl;
       
  4779   OopsInHeapRegionClosure* _copy_perm_obj_cl;
       
  4780   G1ParScanThreadState*    _par_scan_state;
       
  4781 
       
  4782 public:
       
  4783   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
       
  4784                             OopClosure* non_heap_obj_cl,
       
  4785                             OopsInHeapRegionClosure* perm_obj_cl,
       
  4786                             G1ParScanThreadState* pss):
       
  4787     _g1h(g1h),
       
  4788     _copy_non_heap_obj_cl(non_heap_obj_cl),
       
  4789     _copy_perm_obj_cl(perm_obj_cl),
       
  4790     _par_scan_state(pss)
       
  4791   {}
       
  4792 
       
  4793   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
       
  4794   virtual void do_oop(      oop* p) { do_oop_work(p); }
       
  4795 
       
  4796   template <class T> void do_oop_work(T* p) {
       
  4797     oop obj = oopDesc::load_decode_heap_oop(p);
       
  4798 
       
  4799     if (_g1h->obj_in_cs(obj)) {
       
  4800       // If the referent object has been forwarded (either copied
       
  4801       // to a new location or to itself in the event of an
       
  4802       // evacuation failure) then we need to update the reference
       
  4803       // field and, if both reference and referent are in the G1
       
  4804       // heap, update the RSet for the referent.
       
  4805       //
       
  4806       // If the referent has not been forwarded then we have to keep
       
  4807       // it alive by policy. Therefore we have copy the referent.
       
  4808       //
       
  4809       // If the reference field is in the G1 heap then we can push
       
  4810       // on the PSS queue. When the queue is drained (after each
       
  4811       // phase of reference processing) the object and it's followers
       
  4812       // will be copied, the reference field set to point to the
       
  4813       // new location, and the RSet updated. Otherwise we need to
       
  4814       // use the the non-heap or perm closures directly to copy
       
  4815       // the refernt object and update the pointer, while avoiding
       
  4816       // updating the RSet.
       
  4817 
       
  4818       if (_g1h->is_in_g1_reserved(p)) {
       
  4819         _par_scan_state->push_on_queue(p);
       
  4820       } else {
       
  4821         // The reference field is not in the G1 heap.
       
  4822         if (_g1h->perm_gen()->is_in(p)) {
       
  4823           _copy_perm_obj_cl->do_oop(p);
       
  4824         } else {
       
  4825           _copy_non_heap_obj_cl->do_oop(p);
       
  4826         }
       
  4827       }
       
  4828     }
       
  4829   }
       
  4830 };
       
  4831 
       
  4832 // Serial drain queue closure. Called as the 'complete_gc'
       
  4833 // closure for each discovered list in some of the
       
  4834 // reference processing phases.
       
  4835 
       
  4836 class G1STWDrainQueueClosure: public VoidClosure {
       
  4837 protected:
       
  4838   G1CollectedHeap* _g1h;
       
  4839   G1ParScanThreadState* _par_scan_state;
       
  4840 
       
  4841   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
       
  4842 
       
  4843 public:
       
  4844   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
       
  4845     _g1h(g1h),
       
  4846     _par_scan_state(pss)
       
  4847   { }
       
  4848 
       
  4849   void do_void() {
       
  4850     G1ParScanThreadState* const pss = par_scan_state();
       
  4851     pss->trim_queue();
       
  4852   }
       
  4853 };
       
  4854 
       
  4855 // Parallel Reference Processing closures
       
  4856 
       
  4857 // Implementation of AbstractRefProcTaskExecutor for parallel reference
       
  4858 // processing during G1 evacuation pauses.
       
  4859 
       
  4860 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
       
  4861 private:
       
  4862   G1CollectedHeap*   _g1h;
       
  4863   RefToScanQueueSet* _queues;
       
  4864   WorkGang*          _workers;
       
  4865   int                _active_workers;
       
  4866 
       
  4867 public:
       
  4868   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
       
  4869                         WorkGang* workers,
       
  4870                         RefToScanQueueSet *task_queues,
       
  4871                         int n_workers) :
       
  4872     _g1h(g1h),
       
  4873     _queues(task_queues),
       
  4874     _workers(workers),
       
  4875     _active_workers(n_workers)
       
  4876   {
       
  4877     assert(n_workers > 0, "shouldn't call this otherwise");
       
  4878   }
       
  4879 
       
  4880   // Executes the given task using concurrent marking worker threads.
       
  4881   virtual void execute(ProcessTask& task);
       
  4882   virtual void execute(EnqueueTask& task);
       
  4883 };
       
  4884 
       
  4885 // Gang task for possibly parallel reference processing
       
  4886 
       
  4887 class G1STWRefProcTaskProxy: public AbstractGangTask {
       
  4888   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
       
  4889   ProcessTask&     _proc_task;
       
  4890   G1CollectedHeap* _g1h;
       
  4891   RefToScanQueueSet *_task_queues;
       
  4892   ParallelTaskTerminator* _terminator;
       
  4893 
       
  4894 public:
       
  4895   G1STWRefProcTaskProxy(ProcessTask& proc_task,
       
  4896                      G1CollectedHeap* g1h,
       
  4897                      RefToScanQueueSet *task_queues,
       
  4898                      ParallelTaskTerminator* terminator) :
       
  4899     AbstractGangTask("Process reference objects in parallel"),
       
  4900     _proc_task(proc_task),
       
  4901     _g1h(g1h),
       
  4902     _task_queues(task_queues),
       
  4903     _terminator(terminator)
       
  4904   {}
       
  4905 
       
  4906   virtual void work(int i) {
       
  4907     // The reference processing task executed by a single worker.
       
  4908     ResourceMark rm;
       
  4909     HandleMark   hm;
       
  4910 
       
  4911     G1STWIsAliveClosure is_alive(_g1h);
       
  4912 
       
  4913     G1ParScanThreadState pss(_g1h, i);
       
  4914 
       
  4915     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
       
  4916     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
       
  4917     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
       
  4918 
       
  4919     pss.set_evac_closure(&scan_evac_cl);
       
  4920     pss.set_evac_failure_closure(&evac_failure_cl);
       
  4921     pss.set_partial_scan_closure(&partial_scan_cl);
       
  4922 
       
  4923     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
       
  4924     G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
       
  4925 
       
  4926     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
       
  4927     G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
       
  4928 
       
  4929     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
       
  4930     OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
       
  4931 
       
  4932     if (_g1h->g1_policy()->during_initial_mark_pause()) {
       
  4933       // We also need to mark copied objects.
       
  4934       copy_non_heap_cl = &copy_mark_non_heap_cl;
       
  4935       copy_perm_cl = &copy_mark_perm_cl;
       
  4936     }
       
  4937 
       
  4938     // Keep alive closure.
       
  4939     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
       
  4940 
       
  4941     // Complete GC closure
       
  4942     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
       
  4943 
       
  4944     // Call the reference processing task's work routine.
       
  4945     _proc_task.work(i, is_alive, keep_alive, drain_queue);
       
  4946 
       
  4947     // Note we cannot assert that the refs array is empty here as not all
       
  4948     // of the processing tasks (specifically phase2 - pp2_work) execute
       
  4949     // the complete_gc closure (which ordinarily would drain the queue) so
       
  4950     // the queue may not be empty.
       
  4951   }
       
  4952 };
       
  4953 
       
  4954 // Driver routine for parallel reference processing.
       
  4955 // Creates an instance of the ref processing gang
       
  4956 // task and has the worker threads execute it.
       
  4957 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
       
  4958   assert(_workers != NULL, "Need parallel worker threads.");
       
  4959 
       
  4960   ParallelTaskTerminator terminator(_active_workers, _queues);
       
  4961   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
       
  4962 
       
  4963   _g1h->set_par_threads(_active_workers);
       
  4964   _workers->run_task(&proc_task_proxy);
       
  4965   _g1h->set_par_threads(0);
       
  4966 }
       
  4967 
       
  4968 // Gang task for parallel reference enqueueing.
       
  4969 
       
  4970 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
       
  4971   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
       
  4972   EnqueueTask& _enq_task;
       
  4973 
       
  4974 public:
       
  4975   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
       
  4976     AbstractGangTask("Enqueue reference objects in parallel"),
       
  4977     _enq_task(enq_task)
       
  4978   { }
       
  4979 
       
  4980   virtual void work(int i) {
       
  4981     _enq_task.work(i);
       
  4982   }
       
  4983 };
       
  4984 
       
  4985 // Driver routine for parallel reference enqueing.
       
  4986 // Creates an instance of the ref enqueueing gang
       
  4987 // task and has the worker threads execute it.
       
  4988 
       
  4989 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
       
  4990   assert(_workers != NULL, "Need parallel worker threads.");
       
  4991 
       
  4992   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
       
  4993 
       
  4994   _g1h->set_par_threads(_active_workers);
       
  4995   _workers->run_task(&enq_task_proxy);
       
  4996   _g1h->set_par_threads(0);
       
  4997 }
       
  4998 
       
  4999 // End of weak reference support closures
       
  5000 
       
  5001 // Abstract task used to preserve (i.e. copy) any referent objects
       
  5002 // that are in the collection set and are pointed to by reference
       
  5003 // objects discovered by the CM ref processor.
       
  5004 
       
  5005 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
       
  5006 protected:
       
  5007   G1CollectedHeap* _g1h;
       
  5008   RefToScanQueueSet      *_queues;
       
  5009   ParallelTaskTerminator _terminator;
       
  5010   int _n_workers;
       
  5011 
       
  5012 public:
       
  5013   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
       
  5014     AbstractGangTask("ParPreserveCMReferents"),
       
  5015     _g1h(g1h),
       
  5016     _queues(task_queues),
       
  5017     _terminator(workers, _queues),
       
  5018     _n_workers(workers)
       
  5019   { }
       
  5020 
       
  5021   void work(int i) {
       
  5022     ResourceMark rm;
       
  5023     HandleMark   hm;
       
  5024 
       
  5025     G1ParScanThreadState            pss(_g1h, i);
       
  5026     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
       
  5027     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
       
  5028     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
       
  5029 
       
  5030     pss.set_evac_closure(&scan_evac_cl);
       
  5031     pss.set_evac_failure_closure(&evac_failure_cl);
       
  5032     pss.set_partial_scan_closure(&partial_scan_cl);
       
  5033 
       
  5034     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
       
  5035 
       
  5036 
       
  5037     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
       
  5038     G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
       
  5039 
       
  5040     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
       
  5041     G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
       
  5042 
       
  5043     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
       
  5044     OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
       
  5045 
       
  5046     if (_g1h->g1_policy()->during_initial_mark_pause()) {
       
  5047       // We also need to mark copied objects.
       
  5048       copy_non_heap_cl = &copy_mark_non_heap_cl;
       
  5049       copy_perm_cl = &copy_mark_perm_cl;
       
  5050     }
       
  5051 
       
  5052     // Is alive closure
       
  5053     G1AlwaysAliveClosure always_alive(_g1h);
       
  5054 
       
  5055     // Copying keep alive closure. Applied to referent objects that need
       
  5056     // to be copied.
       
  5057     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
       
  5058 
       
  5059     ReferenceProcessor* rp = _g1h->ref_processor_cm();
       
  5060 
       
  5061     int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
       
  5062     int stride = MIN2(MAX2(_n_workers, 1), limit);
       
  5063 
       
  5064     // limit is set using max_num_q() - which was set using ParallelGCThreads.
       
  5065     // So this must be true - but assert just in case someone decides to
       
  5066     // change the worker ids.
       
  5067     assert(0 <= i && i < limit, "sanity");
       
  5068     assert(!rp->discovery_is_atomic(), "check this code");
       
  5069 
       
  5070     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
       
  5071     for (int idx = i; idx < limit; idx += stride) {
       
  5072       DiscoveredList& ref_list = rp->discovered_soft_refs()[idx];
       
  5073 
       
  5074       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
       
  5075       while (iter.has_next()) {
       
  5076         // Since discovery is not atomic for the CM ref processor, we
       
  5077         // can see some null referent objects.
       
  5078         iter.load_ptrs(DEBUG_ONLY(true));
       
  5079         oop ref = iter.obj();
       
  5080 
       
  5081         // This will filter nulls.
       
  5082         if (iter.is_referent_alive()) {
       
  5083           iter.make_referent_alive();
       
  5084         }
       
  5085         iter.move_to_next();
       
  5086       }
       
  5087     }
       
  5088 
       
  5089     // Drain the queue - which may cause stealing
       
  5090     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
       
  5091     drain_queue.do_void();
       
  5092     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
       
  5093     assert(pss.refs()->is_empty(), "should be");
       
  5094   }
       
  5095 };
       
  5096 
       
  5097 // Weak Reference processing during an evacuation pause (part 1).
       
  5098 void G1CollectedHeap::process_discovered_references() {
       
  5099   double ref_proc_start = os::elapsedTime();
       
  5100 
       
  5101   ReferenceProcessor* rp = _ref_processor_stw;
       
  5102   assert(rp->discovery_enabled(), "should have been enabled");
       
  5103 
       
  5104   // Any reference objects, in the collection set, that were 'discovered'
       
  5105   // by the CM ref processor should have already been copied (either by
       
  5106   // applying the external root copy closure to the discovered lists, or
       
  5107   // by following an RSet entry).
       
  5108   //
       
  5109   // But some of the referents, that are in the collection set, that these
       
  5110   // reference objects point to may not have been copied: the STW ref
       
  5111   // processor would have seen that the reference object had already
       
  5112   // been 'discovered' and would have skipped discovering the reference,
       
  5113   // but would not have treated the reference object as a regular oop.
       
  5114   // As a reult the copy closure would not have been applied to the
       
  5115   // referent object.
       
  5116   //
       
  5117   // We need to explicitly copy these referent objects - the references
       
  5118   // will be processed at the end of remarking.
       
  5119   //
       
  5120   // We also need to do this copying before we process the reference
       
  5121   // objects discovered by the STW ref processor in case one of these
       
  5122   // referents points to another object which is also referenced by an
       
  5123   // object discovered by the STW ref processor.
       
  5124 
       
  5125   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
       
  5126                         workers()->total_workers() : 1);
       
  5127 
       
  5128   set_par_threads(n_workers);
       
  5129   G1ParPreserveCMReferentsTask keep_cm_referents(this, n_workers, _task_queues);
       
  5130 
       
  5131   if (G1CollectedHeap::use_parallel_gc_threads()) {
       
  5132     workers()->run_task(&keep_cm_referents);
       
  5133   } else {
       
  5134     keep_cm_referents.work(0);
       
  5135   }
       
  5136 
       
  5137   set_par_threads(0);
       
  5138 
       
  5139   // Closure to test whether a referent is alive.
       
  5140   G1STWIsAliveClosure is_alive(this);
       
  5141 
       
  5142   // Even when parallel reference processing is enabled, the processing
       
  5143   // of JNI refs is serial and performed serially by the current thread
       
  5144   // rather than by a worker. The following PSS will be used for processing
       
  5145   // JNI refs.
       
  5146 
       
  5147   // Use only a single queue for this PSS.
       
  5148   G1ParScanThreadState pss(this, 0);
       
  5149 
       
  5150   // We do not embed a reference processor in the copying/scanning
       
  5151   // closures while we're actually processing the discovered
       
  5152   // reference objects.
       
  5153   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
       
  5154   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
       
  5155   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
       
  5156 
       
  5157   pss.set_evac_closure(&scan_evac_cl);
       
  5158   pss.set_evac_failure_closure(&evac_failure_cl);
       
  5159   pss.set_partial_scan_closure(&partial_scan_cl);
       
  5160 
       
  5161   assert(pss.refs()->is_empty(), "pre-condition");
       
  5162 
       
  5163   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
       
  5164   G1ParScanPermClosure           only_copy_perm_cl(this, &pss, NULL);
       
  5165 
       
  5166   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
       
  5167   G1ParScanAndMarkPermClosure    copy_mark_perm_cl(this, &pss, NULL);
       
  5168 
       
  5169   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
       
  5170   OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
       
  5171 
       
  5172   if (_g1h->g1_policy()->during_initial_mark_pause()) {
       
  5173     // We also need to mark copied objects.
       
  5174     copy_non_heap_cl = &copy_mark_non_heap_cl;
       
  5175     copy_perm_cl = &copy_mark_perm_cl;
       
  5176   }
       
  5177 
       
  5178   // Keep alive closure.
       
  5179   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
       
  5180 
       
  5181   // Serial Complete GC closure
       
  5182   G1STWDrainQueueClosure drain_queue(this, &pss);
       
  5183 
       
  5184   // Setup the soft refs policy...
       
  5185   rp->setup_policy(false);
       
  5186 
       
  5187   if (!rp->processing_is_mt()) {
       
  5188     // Serial reference processing...
       
  5189     rp->process_discovered_references(&is_alive,
       
  5190                                       &keep_alive,
       
  5191                                       &drain_queue,
       
  5192                                       NULL);
       
  5193   } else {
       
  5194     // Parallel reference processing
       
  5195     int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
       
  5196     assert(rp->num_q() == active_workers, "sanity");
       
  5197     assert(active_workers <= rp->max_num_q(), "sanity");
       
  5198 
       
  5199     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
       
  5200     rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
       
  5201   }
       
  5202 
       
  5203   // We have completed copying any necessary live referent objects
       
  5204   // (that were not copied during the actual pause) so we can
       
  5205   // retire any active alloc buffers
       
  5206   pss.retire_alloc_buffers();
       
  5207   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
       
  5208 
       
  5209   double ref_proc_time = os::elapsedTime() - ref_proc_start;
       
  5210   g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0);
       
  5211 }
       
  5212 
       
  5213 // Weak Reference processing during an evacuation pause (part 2).
       
  5214 void G1CollectedHeap::enqueue_discovered_references() {
       
  5215   double ref_enq_start = os::elapsedTime();
       
  5216 
       
  5217   ReferenceProcessor* rp = _ref_processor_stw;
       
  5218   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
       
  5219 
       
  5220   // Now enqueue any remaining on the discovered lists on to
       
  5221   // the pending list.
       
  5222   if (!rp->processing_is_mt()) {
       
  5223     // Serial reference processing...
       
  5224     rp->enqueue_discovered_references();
       
  5225   } else {
       
  5226     // Parallel reference enqueuing
       
  5227 
       
  5228     int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
       
  5229     assert(rp->num_q() == active_workers, "sanity");
       
  5230     assert(active_workers <= rp->max_num_q(), "sanity");
       
  5231 
       
  5232     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
       
  5233     rp->enqueue_discovered_references(&par_task_executor);
       
  5234   }
       
  5235 
       
  5236   rp->verify_no_references_recorded();
       
  5237   assert(!rp->discovery_enabled(), "should have been disabled");
       
  5238 
       
  5239   // FIXME
       
  5240   // CM's reference processing also cleans up the string and symbol tables.
       
  5241   // Should we do that here also? We could, but it is a serial operation
       
  5242   // and could signicantly increase the pause time.
       
  5243 
       
  5244   double ref_enq_time = os::elapsedTime() - ref_enq_start;
       
  5245   g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0);
       
  5246 }
       
  5247 
  4672 void G1CollectedHeap::evacuate_collection_set() {
  5248 void G1CollectedHeap::evacuate_collection_set() {
  4673   set_evacuation_failed(false);
  5249   set_evacuation_failed(false);
  4674 
  5250 
  4675   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  5251   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4676   concurrent_g1_refine()->set_use_cache(false);
  5252   concurrent_g1_refine()->set_use_cache(false);
  4684 
  5260 
  4685   rem_set()->prepare_for_younger_refs_iterate(true);
  5261   rem_set()->prepare_for_younger_refs_iterate(true);
  4686 
  5262 
  4687   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  5263   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  4688   double start_par = os::elapsedTime();
  5264   double start_par = os::elapsedTime();
       
  5265 
  4689   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5266   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4690     // The individual threads will set their evac-failure closures.
  5267     // The individual threads will set their evac-failure closures.
  4691     StrongRootsScope srs(this);
  5268     StrongRootsScope srs(this);
  4692     if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  5269     if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  4693     workers()->run_task(&g1_par_task);
  5270     workers()->run_task(&g1_par_task);
  4698 
  5275 
  4699   double par_time = (os::elapsedTime() - start_par) * 1000.0;
  5276   double par_time = (os::elapsedTime() - start_par) * 1000.0;
  4700   g1_policy()->record_par_time(par_time);
  5277   g1_policy()->record_par_time(par_time);
  4701   set_par_threads(0);
  5278   set_par_threads(0);
  4702 
  5279 
       
  5280   // Process any discovered reference objects - we have
       
  5281   // to do this _before_ we retire the GC alloc regions
       
  5282   // as we may have to copy some 'reachable' referent
       
  5283   // objects (and their reachable sub-graphs) that were
       
  5284   // not copied during the pause.
       
  5285   process_discovered_references();
       
  5286 
  4703   // Weak root processing.
  5287   // Weak root processing.
  4704   // Note: when JSR 292 is enabled and code blobs can contain
  5288   // Note: when JSR 292 is enabled and code blobs can contain
  4705   // non-perm oops then we will need to process the code blobs
  5289   // non-perm oops then we will need to process the code blobs
  4706   // here too.
  5290   // here too.
  4707   {
  5291   {
  4708     G1IsAliveClosure is_alive(this);
  5292     G1STWIsAliveClosure is_alive(this);
  4709     G1KeepAliveClosure keep_alive(this);
  5293     G1KeepAliveClosure keep_alive(this);
  4710     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  5294     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4711   }
  5295   }
       
  5296 
  4712   release_gc_alloc_regions();
  5297   release_gc_alloc_regions();
  4713   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  5298   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  4714 
  5299 
  4715   concurrent_g1_refine()->clear_hot_cache();
  5300   concurrent_g1_refine()->clear_hot_cache();
  4716   concurrent_g1_refine()->set_use_cache(true);
  5301   concurrent_g1_refine()->set_use_cache(true);
  4727       gclog_or_tty->print(" (to-space overflow)");
  5312       gclog_or_tty->print(" (to-space overflow)");
  4728     } else if (PrintGC) {
  5313     } else if (PrintGC) {
  4729       gclog_or_tty->print("--");
  5314       gclog_or_tty->print("--");
  4730     }
  5315     }
  4731   }
  5316   }
       
  5317 
       
  5318   // Enqueue any remaining references remaining on the STW
       
  5319   // reference processor's discovered lists. We need to do
       
  5320   // this after the card table is cleaned (and verified) as
       
  5321   // the act of enqueuing entries on to the pending list
       
  5322   // will log these updates (and dirty their associated
       
  5323   // cards). We need these updates logged to update any
       
  5324   // RSets.
       
  5325   enqueue_discovered_references();
  4732 
  5326 
  4733   if (G1DeferredRSUpdate) {
  5327   if (G1DeferredRSUpdate) {
  4734     RedirtyLoggedCardTableEntryFastClosure redirty;
  5328     RedirtyLoggedCardTableEntryFastClosure redirty;
  4735     dirty_card_queue_set().set_closure(&redirty);
  5329     dirty_card_queue_set().set_closure(&redirty);
  4736     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  5330     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  4928       r->set_next_dirty_cards_region(NULL);
  5522       r->set_next_dirty_cards_region(NULL);
  4929     }
  5523     }
  4930   }
  5524   }
  4931 
  5525 
  4932   double elapsed = os::elapsedTime() - start;
  5526   double elapsed = os::elapsedTime() - start;
  4933   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
  5527   g1_policy()->record_clear_ct_time(elapsed * 1000.0);
  4934 #ifndef PRODUCT
  5528 #ifndef PRODUCT
  4935   if (G1VerifyCTCleanup || VerifyAfterGC) {
  5529   if (G1VerifyCTCleanup || VerifyAfterGC) {
  4936     G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
  5530     G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
  4937     heap_region_iterate(&cleanup_verifier);
  5531     heap_region_iterate(&cleanup_verifier);
  4938   }
  5532   }