2065 } |
2075 } |
2066 |
2076 |
2067 void G1CollectedHeap::ref_processing_init() { |
2077 void G1CollectedHeap::ref_processing_init() { |
2068 // Reference processing in G1 currently works as follows: |
2078 // Reference processing in G1 currently works as follows: |
2069 // |
2079 // |
2070 // * There is only one reference processor instance that |
2080 // * There are two reference processor instances. One is |
2071 // 'spans' the entire heap. It is created by the code |
2081 // used to record and process discovered references |
2072 // below. |
2082 // during concurrent marking; the other is used to |
2073 // * Reference discovery is not enabled during an incremental |
2083 // record and process references during STW pauses |
2074 // pause (see 6484982). |
2084 // (both full and incremental). |
2075 // * Discoverered refs are not enqueued nor are they processed |
2085 // * Both ref processors need to 'span' the entire heap as |
2076 // during an incremental pause (see 6484982). |
2086 // the regions in the collection set may be dotted around. |
2077 // * Reference discovery is enabled at initial marking. |
2087 // |
2078 // * Reference discovery is disabled and the discovered |
2088 // * For the concurrent marking ref processor: |
2079 // references processed etc during remarking. |
2089 // * Reference discovery is enabled at initial marking. |
2080 // * Reference discovery is MT (see below). |
2090 // * Reference discovery is disabled and the discovered |
2081 // * Reference discovery requires a barrier (see below). |
2091 // references processed etc during remarking. |
2082 // * Reference processing is currently not MT (see 6608385). |
2092 // * Reference discovery is MT (see below). |
2083 // * A full GC enables (non-MT) reference discovery and |
2093 // * Reference discovery requires a barrier (see below). |
2084 // processes any discovered references. |
2094 // * Reference processing may or may not be MT |
|
2095 // (depending on the value of ParallelRefProcEnabled |
|
2096 // and ParallelGCThreads). |
|
2097 // * A full GC disables reference discovery by the CM |
|
2098 // ref processor and abandons any entries on it's |
|
2099 // discovered lists. |
|
2100 // |
|
2101 // * For the STW processor: |
|
2102 // * Non MT discovery is enabled at the start of a full GC. |
|
2103 // * Processing and enqueueing during a full GC is non-MT. |
|
2104 // * During a full GC, references are processed after marking. |
|
2105 // |
|
2106 // * Discovery (may or may not be MT) is enabled at the start |
|
2107 // of an incremental evacuation pause. |
|
2108 // * References are processed near the end of a STW evacuation pause. |
|
2109 // * For both types of GC: |
|
2110 // * Discovery is atomic - i.e. not concurrent. |
|
2111 // * Reference discovery will not need a barrier. |
2085 |
2112 |
2086 SharedHeap::ref_processing_init(); |
2113 SharedHeap::ref_processing_init(); |
2087 MemRegion mr = reserved_region(); |
2114 MemRegion mr = reserved_region(); |
2088 _ref_processor = |
2115 |
|
2116 // Concurrent Mark ref processor |
|
2117 _ref_processor_cm = |
2089 new ReferenceProcessor(mr, // span |
2118 new ReferenceProcessor(mr, // span |
2090 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
2119 ParallelRefProcEnabled && (ParallelGCThreads > 1), |
2091 (int) ParallelGCThreads, // degree of mt processing |
2120 // mt processing |
2092 ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery |
2121 (int) ParallelGCThreads, |
2093 (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery |
2122 // degree of mt processing |
2094 false, // Reference discovery is not atomic |
2123 (ParallelGCThreads > 1) || (ConcGCThreads > 1), |
2095 &_is_alive_closure, // is alive closure for efficiency |
2124 // mt discovery |
2096 true); // Setting next fields of discovered |
2125 (int) MAX2(ParallelGCThreads, ConcGCThreads), |
2097 // lists requires a barrier. |
2126 // degree of mt discovery |
|
2127 false, |
|
2128 // Reference discovery is not atomic |
|
2129 &_is_alive_closure_cm, |
|
2130 // is alive closure |
|
2131 // (for efficiency/performance) |
|
2132 true); |
|
2133 // Setting next fields of discovered |
|
2134 // lists requires a barrier. |
|
2135 |
|
2136 // STW ref processor |
|
2137 _ref_processor_stw = |
|
2138 new ReferenceProcessor(mr, // span |
|
2139 ParallelRefProcEnabled && (ParallelGCThreads > 1), |
|
2140 // mt processing |
|
2141 MAX2((int)ParallelGCThreads, 1), |
|
2142 // degree of mt processing |
|
2143 (ParallelGCThreads > 1), |
|
2144 // mt discovery |
|
2145 MAX2((int)ParallelGCThreads, 1), |
|
2146 // degree of mt discovery |
|
2147 true, |
|
2148 // Reference discovery is atomic |
|
2149 &_is_alive_closure_stw, |
|
2150 // is alive closure |
|
2151 // (for efficiency/performance) |
|
2152 false); |
|
2153 // Setting next fields of discovered |
|
2154 // lists requires a barrier. |
2098 } |
2155 } |
2099 |
2156 |
2100 size_t G1CollectedHeap::capacity() const { |
2157 size_t G1CollectedHeap::capacity() const { |
2101 return _g1_committed.byte_size(); |
2158 return _g1_committed.byte_size(); |
2102 } |
2159 } |
3352 |
3413 |
3353 } |
3414 } |
3354 |
3415 |
3355 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
3416 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
3356 |
3417 |
3357 // Please see comment in G1CollectedHeap::ref_processing_init() |
3418 // Please see comment in g1CollectedHeap.hpp and |
3358 // to see how reference processing currently works in G1. |
3419 // G1CollectedHeap::ref_processing_init() to see how |
3359 // |
3420 // reference processing currently works in G1. |
3360 // We want to turn off ref discovery, if necessary, and turn it back on |
3421 |
3361 // on again later if we do. XXX Dubious: why is discovery disabled? |
3422 // Enable discovery in the STW reference processor |
3362 bool was_enabled = ref_processor()->discovery_enabled(); |
3423 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, |
3363 if (was_enabled) ref_processor()->disable_discovery(); |
3424 true /*verify_no_refs*/); |
3364 |
3425 |
3365 // Forget the current alloc region (we might even choose it to be part |
3426 { |
3366 // of the collection set!). |
3427 // We want to temporarily turn off discovery by the |
3367 release_mutator_alloc_region(); |
3428 // CM ref processor, if necessary, and turn it back on |
3368 |
3429 // on again later if we do. Using a scoped |
3369 // We should call this after we retire the mutator alloc |
3430 // NoRefDiscovery object will do this. |
3370 // region(s) so that all the ALLOC / RETIRE events are generated |
3431 NoRefDiscovery no_cm_discovery(ref_processor_cm()); |
3371 // before the start GC event. |
3432 |
3372 _hr_printer.start_gc(false /* full */, (size_t) total_collections()); |
3433 // Forget the current alloc region (we might even choose it to be part |
3373 |
3434 // of the collection set!). |
3374 // The elapsed time induced by the start time below deliberately elides |
3435 release_mutator_alloc_region(); |
3375 // the possible verification above. |
3436 |
3376 double start_time_sec = os::elapsedTime(); |
3437 // We should call this after we retire the mutator alloc |
3377 size_t start_used_bytes = used(); |
3438 // region(s) so that all the ALLOC / RETIRE events are generated |
|
3439 // before the start GC event. |
|
3440 _hr_printer.start_gc(false /* full */, (size_t) total_collections()); |
|
3441 |
|
3442 // The elapsed time induced by the start time below deliberately elides |
|
3443 // the possible verification above. |
|
3444 double start_time_sec = os::elapsedTime(); |
|
3445 size_t start_used_bytes = used(); |
3378 |
3446 |
3379 #if YOUNG_LIST_VERBOSE |
3447 #if YOUNG_LIST_VERBOSE |
3380 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
3448 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
3381 _young_list->print(); |
3449 _young_list->print(); |
3382 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3450 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3383 #endif // YOUNG_LIST_VERBOSE |
3451 #endif // YOUNG_LIST_VERBOSE |
3384 |
3452 |
3385 g1_policy()->record_collection_pause_start(start_time_sec, |
3453 g1_policy()->record_collection_pause_start(start_time_sec, |
3386 start_used_bytes); |
3454 start_used_bytes); |
3387 |
3455 |
3388 #if YOUNG_LIST_VERBOSE |
3456 #if YOUNG_LIST_VERBOSE |
3389 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
3457 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
3390 _young_list->print(); |
3458 _young_list->print(); |
3391 #endif // YOUNG_LIST_VERBOSE |
3459 #endif // YOUNG_LIST_VERBOSE |
3392 |
3460 |
3393 if (g1_policy()->during_initial_mark_pause()) { |
3461 if (g1_policy()->during_initial_mark_pause()) { |
3394 concurrent_mark()->checkpointRootsInitialPre(); |
3462 concurrent_mark()->checkpointRootsInitialPre(); |
3395 } |
3463 } |
3396 perm_gen()->save_marks(); |
3464 perm_gen()->save_marks(); |
3397 |
3465 |
3398 // We must do this before any possible evacuation that should propagate |
3466 // We must do this before any possible evacuation that should propagate |
3399 // marks. |
3467 // marks. |
3400 if (mark_in_progress()) { |
3468 if (mark_in_progress()) { |
3401 double start_time_sec = os::elapsedTime(); |
3469 double start_time_sec = os::elapsedTime(); |
3402 |
3470 |
3403 _cm->drainAllSATBBuffers(); |
3471 _cm->drainAllSATBBuffers(); |
3404 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
3472 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
3405 g1_policy()->record_satb_drain_time(finish_mark_ms); |
3473 g1_policy()->record_satb_drain_time(finish_mark_ms); |
3406 } |
3474 } |
3407 // Record the number of elements currently on the mark stack, so we |
3475 // Record the number of elements currently on the mark stack, so we |
3408 // only iterate over these. (Since evacuation may add to the mark |
3476 // only iterate over these. (Since evacuation may add to the mark |
3409 // stack, doing more exposes race conditions.) If no mark is in |
3477 // stack, doing more exposes race conditions.) If no mark is in |
3410 // progress, this will be zero. |
3478 // progress, this will be zero. |
3411 _cm->set_oops_do_bound(); |
3479 _cm->set_oops_do_bound(); |
3412 |
3480 |
3413 if (mark_in_progress()) { |
3481 if (mark_in_progress()) { |
3414 concurrent_mark()->newCSet(); |
3482 concurrent_mark()->newCSet(); |
3415 } |
3483 } |
3416 |
3484 |
3417 #if YOUNG_LIST_VERBOSE |
3485 #if YOUNG_LIST_VERBOSE |
3418 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
3486 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
3419 _young_list->print(); |
3487 _young_list->print(); |
3420 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3488 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
3421 #endif // YOUNG_LIST_VERBOSE |
3489 #endif // YOUNG_LIST_VERBOSE |
3422 |
3490 |
3423 g1_policy()->choose_collection_set(target_pause_time_ms); |
3491 g1_policy()->choose_collection_set(target_pause_time_ms); |
3424 |
3492 |
3425 if (_hr_printer.is_active()) { |
3493 if (_hr_printer.is_active()) { |
3426 HeapRegion* hr = g1_policy()->collection_set(); |
3494 HeapRegion* hr = g1_policy()->collection_set(); |
3427 while (hr != NULL) { |
3495 while (hr != NULL) { |
3428 G1HRPrinter::RegionType type; |
3496 G1HRPrinter::RegionType type; |
3429 if (!hr->is_young()) { |
3497 if (!hr->is_young()) { |
3430 type = G1HRPrinter::Old; |
3498 type = G1HRPrinter::Old; |
3431 } else if (hr->is_survivor()) { |
3499 } else if (hr->is_survivor()) { |
3432 type = G1HRPrinter::Survivor; |
3500 type = G1HRPrinter::Survivor; |
3433 } else { |
3501 } else { |
3434 type = G1HRPrinter::Eden; |
3502 type = G1HRPrinter::Eden; |
3435 } |
3503 } |
3436 _hr_printer.cset(hr); |
3504 _hr_printer.cset(hr); |
3437 hr = hr->next_in_collection_set(); |
3505 hr = hr->next_in_collection_set(); |
3438 } |
|
3439 } |
|
3440 |
|
3441 // We have chosen the complete collection set. If marking is |
|
3442 // active then, we clear the region fields of any of the |
|
3443 // concurrent marking tasks whose region fields point into |
|
3444 // the collection set as these values will become stale. This |
|
3445 // will cause the owning marking threads to claim a new region |
|
3446 // when marking restarts. |
|
3447 if (mark_in_progress()) { |
|
3448 concurrent_mark()->reset_active_task_region_fields_in_cset(); |
|
3449 } |
|
3450 |
|
3451 #ifdef ASSERT |
|
3452 VerifyCSetClosure cl; |
|
3453 collection_set_iterate(&cl); |
|
3454 #endif // ASSERT |
|
3455 |
|
3456 setup_surviving_young_words(); |
|
3457 |
|
3458 // Initialize the GC alloc regions. |
|
3459 init_gc_alloc_regions(); |
|
3460 |
|
3461 // Actually do the work... |
|
3462 evacuate_collection_set(); |
|
3463 |
|
3464 free_collection_set(g1_policy()->collection_set()); |
|
3465 g1_policy()->clear_collection_set(); |
|
3466 |
|
3467 cleanup_surviving_young_words(); |
|
3468 |
|
3469 // Start a new incremental collection set for the next pause. |
|
3470 g1_policy()->start_incremental_cset_building(); |
|
3471 |
|
3472 // Clear the _cset_fast_test bitmap in anticipation of adding |
|
3473 // regions to the incremental collection set for the next |
|
3474 // evacuation pause. |
|
3475 clear_cset_fast_test(); |
|
3476 |
|
3477 _young_list->reset_sampled_info(); |
|
3478 |
|
3479 // Don't check the whole heap at this point as the |
|
3480 // GC alloc regions from this pause have been tagged |
|
3481 // as survivors and moved on to the survivor list. |
|
3482 // Survivor regions will fail the !is_young() check. |
|
3483 assert(check_young_list_empty(false /* check_heap */), |
|
3484 "young list should be empty"); |
|
3485 |
|
3486 #if YOUNG_LIST_VERBOSE |
|
3487 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
|
3488 _young_list->print(); |
|
3489 #endif // YOUNG_LIST_VERBOSE |
|
3490 |
|
3491 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
|
3492 _young_list->first_survivor_region(), |
|
3493 _young_list->last_survivor_region()); |
|
3494 |
|
3495 _young_list->reset_auxilary_lists(); |
|
3496 |
|
3497 if (evacuation_failed()) { |
|
3498 _summary_bytes_used = recalculate_used(); |
|
3499 } else { |
|
3500 // The "used" of the the collection set have already been subtracted |
|
3501 // when they were freed. Add in the bytes evacuated. |
|
3502 _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); |
|
3503 } |
|
3504 |
|
3505 if (g1_policy()->during_initial_mark_pause()) { |
|
3506 concurrent_mark()->checkpointRootsInitialPost(); |
|
3507 set_marking_started(); |
|
3508 // CAUTION: after the doConcurrentMark() call below, |
|
3509 // the concurrent marking thread(s) could be running |
|
3510 // concurrently with us. Make sure that anything after |
|
3511 // this point does not assume that we are the only GC thread |
|
3512 // running. Note: of course, the actual marking work will |
|
3513 // not start until the safepoint itself is released in |
|
3514 // ConcurrentGCThread::safepoint_desynchronize(). |
|
3515 doConcurrentMark(); |
|
3516 } |
|
3517 |
|
3518 allocate_dummy_regions(); |
|
3519 |
|
3520 #if YOUNG_LIST_VERBOSE |
|
3521 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
|
3522 _young_list->print(); |
|
3523 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
|
3524 #endif // YOUNG_LIST_VERBOSE |
|
3525 |
|
3526 init_mutator_alloc_region(); |
|
3527 |
|
3528 { |
|
3529 size_t expand_bytes = g1_policy()->expansion_amount(); |
|
3530 if (expand_bytes > 0) { |
|
3531 size_t bytes_before = capacity(); |
|
3532 if (!expand(expand_bytes)) { |
|
3533 // We failed to expand the heap so let's verify that |
|
3534 // committed/uncommitted amount match the backing store |
|
3535 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); |
|
3536 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
|
3537 } |
3506 } |
3538 } |
3507 } |
|
3508 |
|
3509 // We have chosen the complete collection set. If marking is |
|
3510 // active then, we clear the region fields of any of the |
|
3511 // concurrent marking tasks whose region fields point into |
|
3512 // the collection set as these values will become stale. This |
|
3513 // will cause the owning marking threads to claim a new region |
|
3514 // when marking restarts. |
|
3515 if (mark_in_progress()) { |
|
3516 concurrent_mark()->reset_active_task_region_fields_in_cset(); |
|
3517 } |
|
3518 |
|
3519 #ifdef ASSERT |
|
3520 VerifyCSetClosure cl; |
|
3521 collection_set_iterate(&cl); |
|
3522 #endif // ASSERT |
|
3523 |
|
3524 setup_surviving_young_words(); |
|
3525 |
|
3526 // Initialize the GC alloc regions. |
|
3527 init_gc_alloc_regions(); |
|
3528 |
|
3529 // Actually do the work... |
|
3530 evacuate_collection_set(); |
|
3531 |
|
3532 free_collection_set(g1_policy()->collection_set()); |
|
3533 g1_policy()->clear_collection_set(); |
|
3534 |
|
3535 cleanup_surviving_young_words(); |
|
3536 |
|
3537 // Start a new incremental collection set for the next pause. |
|
3538 g1_policy()->start_incremental_cset_building(); |
|
3539 |
|
3540 // Clear the _cset_fast_test bitmap in anticipation of adding |
|
3541 // regions to the incremental collection set for the next |
|
3542 // evacuation pause. |
|
3543 clear_cset_fast_test(); |
|
3544 |
|
3545 _young_list->reset_sampled_info(); |
|
3546 |
|
3547 // Don't check the whole heap at this point as the |
|
3548 // GC alloc regions from this pause have been tagged |
|
3549 // as survivors and moved on to the survivor list. |
|
3550 // Survivor regions will fail the !is_young() check. |
|
3551 assert(check_young_list_empty(false /* check_heap */), |
|
3552 "young list should be empty"); |
|
3553 |
|
3554 #if YOUNG_LIST_VERBOSE |
|
3555 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
|
3556 _young_list->print(); |
|
3557 #endif // YOUNG_LIST_VERBOSE |
|
3558 |
|
3559 g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
|
3560 _young_list->first_survivor_region(), |
|
3561 _young_list->last_survivor_region()); |
|
3562 |
|
3563 _young_list->reset_auxilary_lists(); |
|
3564 |
|
3565 if (evacuation_failed()) { |
|
3566 _summary_bytes_used = recalculate_used(); |
|
3567 } else { |
|
3568 // The "used" of the the collection set have already been subtracted |
|
3569 // when they were freed. Add in the bytes evacuated. |
|
3570 _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); |
|
3571 } |
|
3572 |
|
3573 if (g1_policy()->during_initial_mark_pause()) { |
|
3574 concurrent_mark()->checkpointRootsInitialPost(); |
|
3575 set_marking_started(); |
|
3576 // CAUTION: after the doConcurrentMark() call below, |
|
3577 // the concurrent marking thread(s) could be running |
|
3578 // concurrently with us. Make sure that anything after |
|
3579 // this point does not assume that we are the only GC thread |
|
3580 // running. Note: of course, the actual marking work will |
|
3581 // not start until the safepoint itself is released in |
|
3582 // ConcurrentGCThread::safepoint_desynchronize(). |
|
3583 doConcurrentMark(); |
|
3584 } |
|
3585 |
|
3586 allocate_dummy_regions(); |
|
3587 |
|
3588 #if YOUNG_LIST_VERBOSE |
|
3589 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
|
3590 _young_list->print(); |
|
3591 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
|
3592 #endif // YOUNG_LIST_VERBOSE |
|
3593 |
|
3594 init_mutator_alloc_region(); |
|
3595 |
|
3596 { |
|
3597 size_t expand_bytes = g1_policy()->expansion_amount(); |
|
3598 if (expand_bytes > 0) { |
|
3599 size_t bytes_before = capacity(); |
|
3600 if (!expand(expand_bytes)) { |
|
3601 // We failed to expand the heap so let's verify that |
|
3602 // committed/uncommitted amount match the backing store |
|
3603 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); |
|
3604 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
|
3605 } |
|
3606 } |
|
3607 } |
|
3608 |
|
3609 double end_time_sec = os::elapsedTime(); |
|
3610 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
|
3611 g1_policy()->record_pause_time_ms(pause_time_ms); |
|
3612 g1_policy()->record_collection_pause_end(); |
|
3613 |
|
3614 MemoryService::track_memory_usage(); |
|
3615 |
|
3616 // In prepare_for_verify() below we'll need to scan the deferred |
|
3617 // update buffers to bring the RSets up-to-date if |
|
3618 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning |
|
3619 // the update buffers we'll probably need to scan cards on the |
|
3620 // regions we just allocated to (i.e., the GC alloc |
|
3621 // regions). However, during the last GC we called |
|
3622 // set_saved_mark() on all the GC alloc regions, so card |
|
3623 // scanning might skip the [saved_mark_word()...top()] area of |
|
3624 // those regions (i.e., the area we allocated objects into |
|
3625 // during the last GC). But it shouldn't. Given that |
|
3626 // saved_mark_word() is conditional on whether the GC time stamp |
|
3627 // on the region is current or not, by incrementing the GC time |
|
3628 // stamp here we invalidate all the GC time stamps on all the |
|
3629 // regions and saved_mark_word() will simply return top() for |
|
3630 // all the regions. This is a nicer way of ensuring this rather |
|
3631 // than iterating over the regions and fixing them. In fact, the |
|
3632 // GC time stamp increment here also ensures that |
|
3633 // saved_mark_word() will return top() between pauses, i.e., |
|
3634 // during concurrent refinement. So we don't need the |
|
3635 // is_gc_active() check to decided which top to use when |
|
3636 // scanning cards (see CR 7039627). |
|
3637 increment_gc_time_stamp(); |
|
3638 |
|
3639 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
|
3640 HandleMark hm; // Discard invalid handles created during verification |
|
3641 gclog_or_tty->print(" VerifyAfterGC:"); |
|
3642 prepare_for_verify(); |
|
3643 Universe::verify(/* allow dirty */ true, |
|
3644 /* silent */ false, |
|
3645 /* option */ VerifyOption_G1UsePrevMarking); |
|
3646 } |
|
3647 |
|
3648 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); |
|
3649 ref_processor_stw()->verify_no_references_recorded(); |
|
3650 |
|
3651 // CM reference discovery will be re-enabled if necessary. |
3539 } |
3652 } |
3540 |
|
3541 double end_time_sec = os::elapsedTime(); |
|
3542 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
|
3543 g1_policy()->record_pause_time_ms(pause_time_ms); |
|
3544 g1_policy()->record_collection_pause_end(); |
|
3545 |
|
3546 MemoryService::track_memory_usage(); |
|
3547 |
|
3548 // In prepare_for_verify() below we'll need to scan the deferred |
|
3549 // update buffers to bring the RSets up-to-date if |
|
3550 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning |
|
3551 // the update buffers we'll probably need to scan cards on the |
|
3552 // regions we just allocated to (i.e., the GC alloc |
|
3553 // regions). However, during the last GC we called |
|
3554 // set_saved_mark() on all the GC alloc regions, so card |
|
3555 // scanning might skip the [saved_mark_word()...top()] area of |
|
3556 // those regions (i.e., the area we allocated objects into |
|
3557 // during the last GC). But it shouldn't. Given that |
|
3558 // saved_mark_word() is conditional on whether the GC time stamp |
|
3559 // on the region is current or not, by incrementing the GC time |
|
3560 // stamp here we invalidate all the GC time stamps on all the |
|
3561 // regions and saved_mark_word() will simply return top() for |
|
3562 // all the regions. This is a nicer way of ensuring this rather |
|
3563 // than iterating over the regions and fixing them. In fact, the |
|
3564 // GC time stamp increment here also ensures that |
|
3565 // saved_mark_word() will return top() between pauses, i.e., |
|
3566 // during concurrent refinement. So we don't need the |
|
3567 // is_gc_active() check to decided which top to use when |
|
3568 // scanning cards (see CR 7039627). |
|
3569 increment_gc_time_stamp(); |
|
3570 |
|
3571 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
|
3572 HandleMark hm; // Discard invalid handles created during verification |
|
3573 gclog_or_tty->print(" VerifyAfterGC:"); |
|
3574 prepare_for_verify(); |
|
3575 Universe::verify(/* allow dirty */ true, |
|
3576 /* silent */ false, |
|
3577 /* option */ VerifyOption_G1UsePrevMarking); |
|
3578 } |
|
3579 |
|
3580 if (was_enabled) ref_processor()->enable_discovery(); |
|
3581 |
3653 |
3582 { |
3654 { |
3583 size_t expand_bytes = g1_policy()->expansion_amount(); |
3655 size_t expand_bytes = g1_policy()->expansion_amount(); |
3584 if (expand_bytes > 0) { |
3656 if (expand_bytes > 0) { |
3585 size_t bytes_before = capacity(); |
3657 size_t bytes_before = capacity(); |
4667 OopClosure* non_root_closure) { |
4725 OopClosure* non_root_closure) { |
4668 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
4726 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
4669 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
4727 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
4670 } |
4728 } |
4671 |
4729 |
|
4730 // Weak Reference Processing support |
|
4731 |
|
4732 // An always "is_alive" closure that is used to preserve referents. |
|
4733 // If the object is non-null then it's alive. Used in the preservation |
|
4734 // of referent objects that are pointed to by reference objects |
|
4735 // discovered by the CM ref processor. |
|
4736 class G1AlwaysAliveClosure: public BoolObjectClosure { |
|
4737 G1CollectedHeap* _g1; |
|
4738 public: |
|
4739 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
|
4740 void do_object(oop p) { assert(false, "Do not call."); } |
|
4741 bool do_object_b(oop p) { |
|
4742 if (p != NULL) { |
|
4743 return true; |
|
4744 } |
|
4745 return false; |
|
4746 } |
|
4747 }; |
|
4748 |
|
4749 bool G1STWIsAliveClosure::do_object_b(oop p) { |
|
4750 // An object is reachable if it is outside the collection set, |
|
4751 // or is inside and copied. |
|
4752 return !_g1->obj_in_cs(p) || p->is_forwarded(); |
|
4753 } |
|
4754 |
|
4755 // Non Copying Keep Alive closure |
|
4756 class G1KeepAliveClosure: public OopClosure { |
|
4757 G1CollectedHeap* _g1; |
|
4758 public: |
|
4759 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
|
4760 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
|
4761 void do_oop( oop* p) { |
|
4762 oop obj = *p; |
|
4763 |
|
4764 if (_g1->obj_in_cs(obj)) { |
|
4765 assert( obj->is_forwarded(), "invariant" ); |
|
4766 *p = obj->forwardee(); |
|
4767 } |
|
4768 } |
|
4769 }; |
|
4770 |
|
4771 // Copying Keep Alive closure - can be called from both |
|
4772 // serial and parallel code as long as different worker |
|
4773 // threads utilize different G1ParScanThreadState instances |
|
4774 // and different queues. |
|
4775 |
|
4776 class G1CopyingKeepAliveClosure: public OopClosure { |
|
4777 G1CollectedHeap* _g1h; |
|
4778 OopClosure* _copy_non_heap_obj_cl; |
|
4779 OopsInHeapRegionClosure* _copy_perm_obj_cl; |
|
4780 G1ParScanThreadState* _par_scan_state; |
|
4781 |
|
4782 public: |
|
4783 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h, |
|
4784 OopClosure* non_heap_obj_cl, |
|
4785 OopsInHeapRegionClosure* perm_obj_cl, |
|
4786 G1ParScanThreadState* pss): |
|
4787 _g1h(g1h), |
|
4788 _copy_non_heap_obj_cl(non_heap_obj_cl), |
|
4789 _copy_perm_obj_cl(perm_obj_cl), |
|
4790 _par_scan_state(pss) |
|
4791 {} |
|
4792 |
|
4793 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
|
4794 virtual void do_oop( oop* p) { do_oop_work(p); } |
|
4795 |
|
4796 template <class T> void do_oop_work(T* p) { |
|
4797 oop obj = oopDesc::load_decode_heap_oop(p); |
|
4798 |
|
4799 if (_g1h->obj_in_cs(obj)) { |
|
4800 // If the referent object has been forwarded (either copied |
|
4801 // to a new location or to itself in the event of an |
|
4802 // evacuation failure) then we need to update the reference |
|
4803 // field and, if both reference and referent are in the G1 |
|
4804 // heap, update the RSet for the referent. |
|
4805 // |
|
4806 // If the referent has not been forwarded then we have to keep |
|
4807 // it alive by policy. Therefore we have copy the referent. |
|
4808 // |
|
4809 // If the reference field is in the G1 heap then we can push |
|
4810 // on the PSS queue. When the queue is drained (after each |
|
4811 // phase of reference processing) the object and it's followers |
|
4812 // will be copied, the reference field set to point to the |
|
4813 // new location, and the RSet updated. Otherwise we need to |
|
4814 // use the the non-heap or perm closures directly to copy |
|
4815 // the refernt object and update the pointer, while avoiding |
|
4816 // updating the RSet. |
|
4817 |
|
4818 if (_g1h->is_in_g1_reserved(p)) { |
|
4819 _par_scan_state->push_on_queue(p); |
|
4820 } else { |
|
4821 // The reference field is not in the G1 heap. |
|
4822 if (_g1h->perm_gen()->is_in(p)) { |
|
4823 _copy_perm_obj_cl->do_oop(p); |
|
4824 } else { |
|
4825 _copy_non_heap_obj_cl->do_oop(p); |
|
4826 } |
|
4827 } |
|
4828 } |
|
4829 } |
|
4830 }; |
|
4831 |
|
4832 // Serial drain queue closure. Called as the 'complete_gc' |
|
4833 // closure for each discovered list in some of the |
|
4834 // reference processing phases. |
|
4835 |
|
4836 class G1STWDrainQueueClosure: public VoidClosure { |
|
4837 protected: |
|
4838 G1CollectedHeap* _g1h; |
|
4839 G1ParScanThreadState* _par_scan_state; |
|
4840 |
|
4841 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } |
|
4842 |
|
4843 public: |
|
4844 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : |
|
4845 _g1h(g1h), |
|
4846 _par_scan_state(pss) |
|
4847 { } |
|
4848 |
|
4849 void do_void() { |
|
4850 G1ParScanThreadState* const pss = par_scan_state(); |
|
4851 pss->trim_queue(); |
|
4852 } |
|
4853 }; |
|
4854 |
|
4855 // Parallel Reference Processing closures |
|
4856 |
|
4857 // Implementation of AbstractRefProcTaskExecutor for parallel reference |
|
4858 // processing during G1 evacuation pauses. |
|
4859 |
|
4860 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
|
4861 private: |
|
4862 G1CollectedHeap* _g1h; |
|
4863 RefToScanQueueSet* _queues; |
|
4864 WorkGang* _workers; |
|
4865 int _active_workers; |
|
4866 |
|
4867 public: |
|
4868 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h, |
|
4869 WorkGang* workers, |
|
4870 RefToScanQueueSet *task_queues, |
|
4871 int n_workers) : |
|
4872 _g1h(g1h), |
|
4873 _queues(task_queues), |
|
4874 _workers(workers), |
|
4875 _active_workers(n_workers) |
|
4876 { |
|
4877 assert(n_workers > 0, "shouldn't call this otherwise"); |
|
4878 } |
|
4879 |
|
4880 // Executes the given task using concurrent marking worker threads. |
|
4881 virtual void execute(ProcessTask& task); |
|
4882 virtual void execute(EnqueueTask& task); |
|
4883 }; |
|
4884 |
|
4885 // Gang task for possibly parallel reference processing |
|
4886 |
|
4887 class G1STWRefProcTaskProxy: public AbstractGangTask { |
|
4888 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
|
4889 ProcessTask& _proc_task; |
|
4890 G1CollectedHeap* _g1h; |
|
4891 RefToScanQueueSet *_task_queues; |
|
4892 ParallelTaskTerminator* _terminator; |
|
4893 |
|
4894 public: |
|
4895 G1STWRefProcTaskProxy(ProcessTask& proc_task, |
|
4896 G1CollectedHeap* g1h, |
|
4897 RefToScanQueueSet *task_queues, |
|
4898 ParallelTaskTerminator* terminator) : |
|
4899 AbstractGangTask("Process reference objects in parallel"), |
|
4900 _proc_task(proc_task), |
|
4901 _g1h(g1h), |
|
4902 _task_queues(task_queues), |
|
4903 _terminator(terminator) |
|
4904 {} |
|
4905 |
|
4906 virtual void work(int i) { |
|
4907 // The reference processing task executed by a single worker. |
|
4908 ResourceMark rm; |
|
4909 HandleMark hm; |
|
4910 |
|
4911 G1STWIsAliveClosure is_alive(_g1h); |
|
4912 |
|
4913 G1ParScanThreadState pss(_g1h, i); |
|
4914 |
|
4915 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); |
|
4916 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); |
|
4917 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); |
|
4918 |
|
4919 pss.set_evac_closure(&scan_evac_cl); |
|
4920 pss.set_evac_failure_closure(&evac_failure_cl); |
|
4921 pss.set_partial_scan_closure(&partial_scan_cl); |
|
4922 |
|
4923 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); |
|
4924 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL); |
|
4925 |
|
4926 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); |
|
4927 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL); |
|
4928 |
|
4929 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; |
|
4930 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; |
|
4931 |
|
4932 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
|
4933 // We also need to mark copied objects. |
|
4934 copy_non_heap_cl = ©_mark_non_heap_cl; |
|
4935 copy_perm_cl = ©_mark_perm_cl; |
|
4936 } |
|
4937 |
|
4938 // Keep alive closure. |
|
4939 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss); |
|
4940 |
|
4941 // Complete GC closure |
|
4942 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); |
|
4943 |
|
4944 // Call the reference processing task's work routine. |
|
4945 _proc_task.work(i, is_alive, keep_alive, drain_queue); |
|
4946 |
|
4947 // Note we cannot assert that the refs array is empty here as not all |
|
4948 // of the processing tasks (specifically phase2 - pp2_work) execute |
|
4949 // the complete_gc closure (which ordinarily would drain the queue) so |
|
4950 // the queue may not be empty. |
|
4951 } |
|
4952 }; |
|
4953 |
|
4954 // Driver routine for parallel reference processing. |
|
4955 // Creates an instance of the ref processing gang |
|
4956 // task and has the worker threads execute it. |
|
4957 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) { |
|
4958 assert(_workers != NULL, "Need parallel worker threads."); |
|
4959 |
|
4960 ParallelTaskTerminator terminator(_active_workers, _queues); |
|
4961 G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator); |
|
4962 |
|
4963 _g1h->set_par_threads(_active_workers); |
|
4964 _workers->run_task(&proc_task_proxy); |
|
4965 _g1h->set_par_threads(0); |
|
4966 } |
|
4967 |
|
4968 // Gang task for parallel reference enqueueing. |
|
4969 |
|
4970 class G1STWRefEnqueueTaskProxy: public AbstractGangTask { |
|
4971 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; |
|
4972 EnqueueTask& _enq_task; |
|
4973 |
|
4974 public: |
|
4975 G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) : |
|
4976 AbstractGangTask("Enqueue reference objects in parallel"), |
|
4977 _enq_task(enq_task) |
|
4978 { } |
|
4979 |
|
4980 virtual void work(int i) { |
|
4981 _enq_task.work(i); |
|
4982 } |
|
4983 }; |
|
4984 |
|
4985 // Driver routine for parallel reference enqueing. |
|
4986 // Creates an instance of the ref enqueueing gang |
|
4987 // task and has the worker threads execute it. |
|
4988 |
|
4989 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) { |
|
4990 assert(_workers != NULL, "Need parallel worker threads."); |
|
4991 |
|
4992 G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task); |
|
4993 |
|
4994 _g1h->set_par_threads(_active_workers); |
|
4995 _workers->run_task(&enq_task_proxy); |
|
4996 _g1h->set_par_threads(0); |
|
4997 } |
|
4998 |
|
4999 // End of weak reference support closures |
|
5000 |
|
5001 // Abstract task used to preserve (i.e. copy) any referent objects |
|
5002 // that are in the collection set and are pointed to by reference |
|
5003 // objects discovered by the CM ref processor. |
|
5004 |
|
5005 class G1ParPreserveCMReferentsTask: public AbstractGangTask { |
|
5006 protected: |
|
5007 G1CollectedHeap* _g1h; |
|
5008 RefToScanQueueSet *_queues; |
|
5009 ParallelTaskTerminator _terminator; |
|
5010 int _n_workers; |
|
5011 |
|
5012 public: |
|
5013 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) : |
|
5014 AbstractGangTask("ParPreserveCMReferents"), |
|
5015 _g1h(g1h), |
|
5016 _queues(task_queues), |
|
5017 _terminator(workers, _queues), |
|
5018 _n_workers(workers) |
|
5019 { } |
|
5020 |
|
5021 void work(int i) { |
|
5022 ResourceMark rm; |
|
5023 HandleMark hm; |
|
5024 |
|
5025 G1ParScanThreadState pss(_g1h, i); |
|
5026 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); |
|
5027 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); |
|
5028 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); |
|
5029 |
|
5030 pss.set_evac_closure(&scan_evac_cl); |
|
5031 pss.set_evac_failure_closure(&evac_failure_cl); |
|
5032 pss.set_partial_scan_closure(&partial_scan_cl); |
|
5033 |
|
5034 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); |
|
5035 |
|
5036 |
|
5037 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); |
|
5038 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL); |
|
5039 |
|
5040 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); |
|
5041 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL); |
|
5042 |
|
5043 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; |
|
5044 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; |
|
5045 |
|
5046 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
|
5047 // We also need to mark copied objects. |
|
5048 copy_non_heap_cl = ©_mark_non_heap_cl; |
|
5049 copy_perm_cl = ©_mark_perm_cl; |
|
5050 } |
|
5051 |
|
5052 // Is alive closure |
|
5053 G1AlwaysAliveClosure always_alive(_g1h); |
|
5054 |
|
5055 // Copying keep alive closure. Applied to referent objects that need |
|
5056 // to be copied. |
|
5057 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss); |
|
5058 |
|
5059 ReferenceProcessor* rp = _g1h->ref_processor_cm(); |
|
5060 |
|
5061 int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q(); |
|
5062 int stride = MIN2(MAX2(_n_workers, 1), limit); |
|
5063 |
|
5064 // limit is set using max_num_q() - which was set using ParallelGCThreads. |
|
5065 // So this must be true - but assert just in case someone decides to |
|
5066 // change the worker ids. |
|
5067 assert(0 <= i && i < limit, "sanity"); |
|
5068 assert(!rp->discovery_is_atomic(), "check this code"); |
|
5069 |
|
5070 // Select discovered lists [i, i+stride, i+2*stride,...,limit) |
|
5071 for (int idx = i; idx < limit; idx += stride) { |
|
5072 DiscoveredList& ref_list = rp->discovered_soft_refs()[idx]; |
|
5073 |
|
5074 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive); |
|
5075 while (iter.has_next()) { |
|
5076 // Since discovery is not atomic for the CM ref processor, we |
|
5077 // can see some null referent objects. |
|
5078 iter.load_ptrs(DEBUG_ONLY(true)); |
|
5079 oop ref = iter.obj(); |
|
5080 |
|
5081 // This will filter nulls. |
|
5082 if (iter.is_referent_alive()) { |
|
5083 iter.make_referent_alive(); |
|
5084 } |
|
5085 iter.move_to_next(); |
|
5086 } |
|
5087 } |
|
5088 |
|
5089 // Drain the queue - which may cause stealing |
|
5090 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator); |
|
5091 drain_queue.do_void(); |
|
5092 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure |
|
5093 assert(pss.refs()->is_empty(), "should be"); |
|
5094 } |
|
5095 }; |
|
5096 |
|
5097 // Weak Reference processing during an evacuation pause (part 1). |
|
5098 void G1CollectedHeap::process_discovered_references() { |
|
5099 double ref_proc_start = os::elapsedTime(); |
|
5100 |
|
5101 ReferenceProcessor* rp = _ref_processor_stw; |
|
5102 assert(rp->discovery_enabled(), "should have been enabled"); |
|
5103 |
|
5104 // Any reference objects, in the collection set, that were 'discovered' |
|
5105 // by the CM ref processor should have already been copied (either by |
|
5106 // applying the external root copy closure to the discovered lists, or |
|
5107 // by following an RSet entry). |
|
5108 // |
|
5109 // But some of the referents, that are in the collection set, that these |
|
5110 // reference objects point to may not have been copied: the STW ref |
|
5111 // processor would have seen that the reference object had already |
|
5112 // been 'discovered' and would have skipped discovering the reference, |
|
5113 // but would not have treated the reference object as a regular oop. |
|
5114 // As a reult the copy closure would not have been applied to the |
|
5115 // referent object. |
|
5116 // |
|
5117 // We need to explicitly copy these referent objects - the references |
|
5118 // will be processed at the end of remarking. |
|
5119 // |
|
5120 // We also need to do this copying before we process the reference |
|
5121 // objects discovered by the STW ref processor in case one of these |
|
5122 // referents points to another object which is also referenced by an |
|
5123 // object discovered by the STW ref processor. |
|
5124 |
|
5125 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? |
|
5126 workers()->total_workers() : 1); |
|
5127 |
|
5128 set_par_threads(n_workers); |
|
5129 G1ParPreserveCMReferentsTask keep_cm_referents(this, n_workers, _task_queues); |
|
5130 |
|
5131 if (G1CollectedHeap::use_parallel_gc_threads()) { |
|
5132 workers()->run_task(&keep_cm_referents); |
|
5133 } else { |
|
5134 keep_cm_referents.work(0); |
|
5135 } |
|
5136 |
|
5137 set_par_threads(0); |
|
5138 |
|
5139 // Closure to test whether a referent is alive. |
|
5140 G1STWIsAliveClosure is_alive(this); |
|
5141 |
|
5142 // Even when parallel reference processing is enabled, the processing |
|
5143 // of JNI refs is serial and performed serially by the current thread |
|
5144 // rather than by a worker. The following PSS will be used for processing |
|
5145 // JNI refs. |
|
5146 |
|
5147 // Use only a single queue for this PSS. |
|
5148 G1ParScanThreadState pss(this, 0); |
|
5149 |
|
5150 // We do not embed a reference processor in the copying/scanning |
|
5151 // closures while we're actually processing the discovered |
|
5152 // reference objects. |
|
5153 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL); |
|
5154 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL); |
|
5155 G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL); |
|
5156 |
|
5157 pss.set_evac_closure(&scan_evac_cl); |
|
5158 pss.set_evac_failure_closure(&evac_failure_cl); |
|
5159 pss.set_partial_scan_closure(&partial_scan_cl); |
|
5160 |
|
5161 assert(pss.refs()->is_empty(), "pre-condition"); |
|
5162 |
|
5163 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); |
|
5164 G1ParScanPermClosure only_copy_perm_cl(this, &pss, NULL); |
|
5165 |
|
5166 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); |
|
5167 G1ParScanAndMarkPermClosure copy_mark_perm_cl(this, &pss, NULL); |
|
5168 |
|
5169 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; |
|
5170 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl; |
|
5171 |
|
5172 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
|
5173 // We also need to mark copied objects. |
|
5174 copy_non_heap_cl = ©_mark_non_heap_cl; |
|
5175 copy_perm_cl = ©_mark_perm_cl; |
|
5176 } |
|
5177 |
|
5178 // Keep alive closure. |
|
5179 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss); |
|
5180 |
|
5181 // Serial Complete GC closure |
|
5182 G1STWDrainQueueClosure drain_queue(this, &pss); |
|
5183 |
|
5184 // Setup the soft refs policy... |
|
5185 rp->setup_policy(false); |
|
5186 |
|
5187 if (!rp->processing_is_mt()) { |
|
5188 // Serial reference processing... |
|
5189 rp->process_discovered_references(&is_alive, |
|
5190 &keep_alive, |
|
5191 &drain_queue, |
|
5192 NULL); |
|
5193 } else { |
|
5194 // Parallel reference processing |
|
5195 int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
|
5196 assert(rp->num_q() == active_workers, "sanity"); |
|
5197 assert(active_workers <= rp->max_num_q(), "sanity"); |
|
5198 |
|
5199 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers); |
|
5200 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); |
|
5201 } |
|
5202 |
|
5203 // We have completed copying any necessary live referent objects |
|
5204 // (that were not copied during the actual pause) so we can |
|
5205 // retire any active alloc buffers |
|
5206 pss.retire_alloc_buffers(); |
|
5207 assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); |
|
5208 |
|
5209 double ref_proc_time = os::elapsedTime() - ref_proc_start; |
|
5210 g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0); |
|
5211 } |
|
5212 |
|
5213 // Weak Reference processing during an evacuation pause (part 2). |
|
5214 void G1CollectedHeap::enqueue_discovered_references() { |
|
5215 double ref_enq_start = os::elapsedTime(); |
|
5216 |
|
5217 ReferenceProcessor* rp = _ref_processor_stw; |
|
5218 assert(!rp->discovery_enabled(), "should have been disabled as part of processing"); |
|
5219 |
|
5220 // Now enqueue any remaining on the discovered lists on to |
|
5221 // the pending list. |
|
5222 if (!rp->processing_is_mt()) { |
|
5223 // Serial reference processing... |
|
5224 rp->enqueue_discovered_references(); |
|
5225 } else { |
|
5226 // Parallel reference enqueuing |
|
5227 |
|
5228 int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
|
5229 assert(rp->num_q() == active_workers, "sanity"); |
|
5230 assert(active_workers <= rp->max_num_q(), "sanity"); |
|
5231 |
|
5232 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers); |
|
5233 rp->enqueue_discovered_references(&par_task_executor); |
|
5234 } |
|
5235 |
|
5236 rp->verify_no_references_recorded(); |
|
5237 assert(!rp->discovery_enabled(), "should have been disabled"); |
|
5238 |
|
5239 // FIXME |
|
5240 // CM's reference processing also cleans up the string and symbol tables. |
|
5241 // Should we do that here also? We could, but it is a serial operation |
|
5242 // and could signicantly increase the pause time. |
|
5243 |
|
5244 double ref_enq_time = os::elapsedTime() - ref_enq_start; |
|
5245 g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0); |
|
5246 } |
|
5247 |
4672 void G1CollectedHeap::evacuate_collection_set() { |
5248 void G1CollectedHeap::evacuate_collection_set() { |
4673 set_evacuation_failed(false); |
5249 set_evacuation_failed(false); |
4674 |
5250 |
4675 g1_rem_set()->prepare_for_oops_into_collection_set_do(); |
5251 g1_rem_set()->prepare_for_oops_into_collection_set_do(); |
4676 concurrent_g1_refine()->set_use_cache(false); |
5252 concurrent_g1_refine()->set_use_cache(false); |