223 |
223 |
224 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)), |
224 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)), |
225 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)), |
225 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)), |
226 |
226 |
227 _recent_avg_pause_time_ratio(0.0), |
227 _recent_avg_pause_time_ratio(0.0), |
228 _num_markings(0), |
|
229 _n_marks(0), |
|
230 _n_pauses_at_mark_end(0), |
|
231 |
228 |
232 _all_full_gc_times_ms(new NumberSeq()), |
229 _all_full_gc_times_ms(new NumberSeq()), |
233 |
230 |
234 // G1PausesBtwnConcMark defaults to -1 |
231 // G1PausesBtwnConcMark defaults to -1 |
235 // so the hack is to do the cast QQQ FIXME |
232 // so the hack is to do the cast QQQ FIXME |
236 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark), |
233 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark), |
237 _n_marks_since_last_pause(0), |
|
238 _initiate_conc_mark_if_possible(false), |
234 _initiate_conc_mark_if_possible(false), |
239 _during_initial_mark_pause(false), |
235 _during_initial_mark_pause(false), |
240 _should_revert_to_full_young_gcs(false), |
236 _should_revert_to_full_young_gcs(false), |
241 _last_full_young_gc(false), |
237 _last_full_young_gc(false), |
242 |
238 |
438 // This will be set when the heap is expanded |
434 // This will be set when the heap is expanded |
439 // for the first time during initialization. |
435 // for the first time during initialization. |
440 _reserve_regions = 0; |
436 _reserve_regions = 0; |
441 |
437 |
442 initialize_all(); |
438 initialize_all(); |
|
439 _collectionSetChooser = new CollectionSetChooser(); |
443 } |
440 } |
444 |
441 |
445 // Increment "i", mod "len" |
442 // Increment "i", mod "len" |
446 static void inc_mod(int& i, int len) { |
443 static void inc_mod(int& i, int len) { |
447 i++; if (i == len) i = 0; |
444 i++; if (i == len) i = 0; |
919 |
916 |
920 _free_regions_at_end_of_collection = _g1->free_regions(); |
917 _free_regions_at_end_of_collection = _g1->free_regions(); |
921 // Reset survivors SurvRateGroup. |
918 // Reset survivors SurvRateGroup. |
922 _survivor_surv_rate_group->reset(); |
919 _survivor_surv_rate_group->reset(); |
923 update_young_list_target_length(); |
920 update_young_list_target_length(); |
|
921 _collectionSetChooser->updateAfterFullCollection(); |
924 } |
922 } |
925 |
923 |
926 void G1CollectorPolicy::record_stop_world_start() { |
924 void G1CollectorPolicy::record_stop_world_start() { |
927 _stop_world_start = os::elapsedTime(); |
925 _stop_world_start = os::elapsedTime(); |
928 } |
926 } |
1027 |
1025 |
1028 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { |
1026 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { |
1029 _mark_cleanup_start_sec = os::elapsedTime(); |
1027 _mark_cleanup_start_sec = os::elapsedTime(); |
1030 } |
1028 } |
1031 |
1029 |
1032 void |
1030 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { |
1033 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes, |
|
1034 size_t max_live_bytes) { |
|
1035 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes); |
|
1036 record_concurrent_mark_cleanup_end_work2(); |
|
1037 } |
|
1038 |
|
1039 void |
|
1040 G1CollectorPolicy:: |
|
1041 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, |
|
1042 size_t max_live_bytes) { |
|
1043 if (_n_marks < 2) { |
|
1044 _n_marks++; |
|
1045 } |
|
1046 } |
|
1047 |
|
1048 // The important thing about this is that it includes "os::elapsedTime". |
|
1049 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() { |
|
1050 double end_time_sec = os::elapsedTime(); |
|
1051 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0; |
|
1052 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); |
|
1053 _cur_mark_stop_world_time_ms += elapsed_time_ms; |
|
1054 _prev_collection_pause_end_ms += elapsed_time_ms; |
|
1055 |
|
1056 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true); |
|
1057 |
|
1058 _num_markings++; |
|
1059 _n_pauses_at_mark_end = _n_pauses; |
|
1060 _n_marks_since_last_pause++; |
|
1061 } |
|
1062 |
|
1063 void |
|
1064 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { |
|
1065 _should_revert_to_full_young_gcs = false; |
1031 _should_revert_to_full_young_gcs = false; |
1066 _last_full_young_gc = true; |
1032 _last_full_young_gc = true; |
1067 _in_marking_window = false; |
1033 _in_marking_window = false; |
1068 } |
1034 } |
1069 |
1035 |
1499 if (update_stats) { |
1465 if (update_stats) { |
1500 summary->record_total_time_ms(elapsed_ms); |
1466 summary->record_total_time_ms(elapsed_ms); |
1501 summary->record_other_time_ms(other_time_ms); |
1467 summary->record_other_time_ms(other_time_ms); |
1502 } |
1468 } |
1503 for (int i = 0; i < _aux_num; ++i) |
1469 for (int i = 0; i < _aux_num; ++i) |
1504 if (_cur_aux_times_set[i]) |
1470 if (_cur_aux_times_set[i]) { |
1505 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]); |
1471 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]); |
1506 |
1472 } |
1507 // Reset marks-between-pauses counter. |
|
1508 _n_marks_since_last_pause = 0; |
|
1509 |
1473 |
1510 // Update the efficiency-since-mark vars. |
1474 // Update the efficiency-since-mark vars. |
1511 double proc_ms = elapsed_ms * (double) _parallel_gc_threads; |
1475 double proc_ms = elapsed_ms * (double) _parallel_gc_threads; |
1512 if (elapsed_ms < MIN_TIMER_GRANULARITY) { |
1476 if (elapsed_ms < MIN_TIMER_GRANULARITY) { |
1513 // This usually happens due to the timer not having the required |
1477 // This usually happens due to the timer not having the required |
1727 |
1691 |
1728 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. |
1692 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. |
1729 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; |
1693 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; |
1730 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); |
1694 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); |
1731 // </NEW PREDICTION> |
1695 // </NEW PREDICTION> |
|
1696 |
|
1697 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end."); |
1732 } |
1698 } |
1733 |
1699 |
1734 #define EXT_SIZE_FORMAT "%d%s" |
1700 #define EXT_SIZE_FORMAT "%d%s" |
1735 #define EXT_SIZE_PARAMS(bytes) \ |
1701 #define EXT_SIZE_PARAMS(bytes) \ |
1736 byte_size_in_proper_unit((bytes)), \ |
1702 byte_size_in_proper_unit((bytes)), \ |
2530 ergo_format_reason("concurrent cycle already in progress")); |
2492 ergo_format_reason("concurrent cycle already in progress")); |
2531 } |
2493 } |
2532 } |
2494 } |
2533 } |
2495 } |
2534 |
2496 |
2535 void |
|
2536 G1CollectorPolicy_BestRegionsFirst:: |
|
2537 record_collection_pause_start(double start_time_sec, size_t start_used) { |
|
2538 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used); |
|
2539 } |
|
2540 |
|
2541 class KnownGarbageClosure: public HeapRegionClosure { |
2497 class KnownGarbageClosure: public HeapRegionClosure { |
2542 CollectionSetChooser* _hrSorted; |
2498 CollectionSetChooser* _hrSorted; |
2543 |
2499 |
2544 public: |
2500 public: |
2545 KnownGarbageClosure(CollectionSetChooser* hrSorted) : |
2501 KnownGarbageClosure(CollectionSetChooser* hrSorted) : |
2643 } |
2599 } |
2644 } |
2600 } |
2645 }; |
2601 }; |
2646 |
2602 |
2647 void |
2603 void |
2648 G1CollectorPolicy_BestRegionsFirst:: |
2604 G1CollectorPolicy::record_concurrent_mark_cleanup_end() { |
2649 record_concurrent_mark_cleanup_end(size_t freed_bytes, |
2605 double start_sec; |
2650 size_t max_live_bytes) { |
2606 if (G1PrintParCleanupStats) { |
2651 double start; |
2607 start_sec = os::elapsedTime(); |
2652 if (G1PrintParCleanupStats) start = os::elapsedTime(); |
2608 } |
2653 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes); |
|
2654 |
2609 |
2655 _collectionSetChooser->clearMarkedHeapRegions(); |
2610 _collectionSetChooser->clearMarkedHeapRegions(); |
2656 double clear_marked_end; |
2611 double clear_marked_end_sec; |
2657 if (G1PrintParCleanupStats) { |
2612 if (G1PrintParCleanupStats) { |
2658 clear_marked_end = os::elapsedTime(); |
2613 clear_marked_end_sec = os::elapsedTime(); |
2659 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.", |
2614 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.", |
2660 (clear_marked_end - start)*1000.0); |
2615 (clear_marked_end_sec - start_sec) * 1000.0); |
2661 } |
2616 } |
|
2617 |
2662 if (G1CollectedHeap::use_parallel_gc_threads()) { |
2618 if (G1CollectedHeap::use_parallel_gc_threads()) { |
2663 const size_t OverpartitionFactor = 4; |
2619 const size_t OverpartitionFactor = 4; |
2664 const size_t MinWorkUnit = 8; |
2620 const size_t MinWorkUnit = 8; |
2665 const size_t WorkUnit = |
2621 const size_t WorkUnit = |
2666 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), |
2622 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), |
2675 "sanity check"); |
2631 "sanity check"); |
2676 } else { |
2632 } else { |
2677 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); |
2633 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); |
2678 _g1->heap_region_iterate(&knownGarbagecl); |
2634 _g1->heap_region_iterate(&knownGarbagecl); |
2679 } |
2635 } |
2680 double known_garbage_end; |
2636 double known_garbage_end_sec; |
2681 if (G1PrintParCleanupStats) { |
2637 if (G1PrintParCleanupStats) { |
2682 known_garbage_end = os::elapsedTime(); |
2638 known_garbage_end_sec = os::elapsedTime(); |
2683 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.", |
2639 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.", |
2684 (known_garbage_end - clear_marked_end)*1000.0); |
2640 (known_garbage_end_sec - clear_marked_end_sec) * 1000.0); |
2685 } |
2641 } |
|
2642 |
2686 _collectionSetChooser->sortMarkedHeapRegions(); |
2643 _collectionSetChooser->sortMarkedHeapRegions(); |
2687 double sort_end; |
2644 double end_sec = os::elapsedTime(); |
2688 if (G1PrintParCleanupStats) { |
2645 if (G1PrintParCleanupStats) { |
2689 sort_end = os::elapsedTime(); |
|
2690 gclog_or_tty->print_cr(" sorting: %8.3f ms.", |
2646 gclog_or_tty->print_cr(" sorting: %8.3f ms.", |
2691 (sort_end - known_garbage_end)*1000.0); |
2647 (end_sec - known_garbage_end_sec) * 1000.0); |
2692 } |
2648 } |
2693 |
2649 |
2694 record_concurrent_mark_cleanup_end_work2(); |
2650 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; |
2695 double work2_end; |
2651 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); |
2696 if (G1PrintParCleanupStats) { |
2652 _cur_mark_stop_world_time_ms += elapsed_time_ms; |
2697 work2_end = os::elapsedTime(); |
2653 _prev_collection_pause_end_ms += elapsed_time_ms; |
2698 gclog_or_tty->print_cr(" work2: %8.3f ms.", |
2654 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true); |
2699 (work2_end - sort_end)*1000.0); |
|
2700 } |
|
2701 } |
2655 } |
2702 |
2656 |
2703 // Add the heap region at the head of the non-incremental collection set |
2657 // Add the heap region at the head of the non-incremental collection set |
2704 void G1CollectorPolicy:: |
2658 void G1CollectorPolicy:: |
2705 add_to_collection_set(HeapRegion* hr) { |
2659 add_to_collection_set(HeapRegion* hr) { |
3113 |
3065 |
3114 double non_young_end_time_sec = os::elapsedTime(); |
3066 double non_young_end_time_sec = os::elapsedTime(); |
3115 _recorded_non_young_cset_choice_time_ms = |
3067 _recorded_non_young_cset_choice_time_ms = |
3116 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0; |
3068 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0; |
3117 } |
3069 } |
3118 |
|
3119 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() { |
|
3120 G1CollectorPolicy::record_full_collection_end(); |
|
3121 _collectionSetChooser->updateAfterFullCollection(); |
|
3122 } |
|
3123 |
|
3124 void G1CollectorPolicy_BestRegionsFirst:: |
|
3125 record_collection_pause_end() { |
|
3126 G1CollectorPolicy::record_collection_pause_end(); |
|
3127 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end."); |
|
3128 } |
|