# HG changeset patch # User drwhite # Date 1445876544 14400 # Node ID 7afc768e4d62c11010e995985a02d0d8bad0a622 # Parent 2db29ded386543edda17601f288a498a87268109 8138920: Refactor the sampling thread from ConcurrentG1RefineThread Summary: Helps enable running without concurrent refinement threads Reviewed-by: brutisso, pliden diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp Tue Oct 27 15:09:19 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp Mon Oct 26 12:22:24 2015 -0400 @@ -30,7 +30,8 @@ #include "runtime/java.hpp" ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) : - _threads(NULL), _n_threads(0), + _threads(NULL), + _sample_thread(NULL), _hot_card_cache(g1h) { // Ergonomically select initial concurrent refinement parameters @@ -58,12 +59,10 @@ return NULL; } cg1r->_n_worker_threads = thread_num(); - // We need one extra thread to do the young gen rset size sampling. - cg1r->_n_threads = cg1r->_n_worker_threads + 1; cg1r->reset_threshold_step(); - cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_threads, mtGC); + cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_worker_threads, mtGC); if (cg1r->_threads == NULL) { *ecode = JNI_ENOMEM; vm_shutdown_during_initialization("Could not allocate an array for ConcurrentG1RefineThread"); @@ -73,7 +72,7 @@ uint worker_id_offset = DirtyCardQueueSet::num_par_ids(); ConcurrentG1RefineThread *next = NULL; - for (uint i = cg1r->_n_threads - 1; i != UINT_MAX; i--) { + for (uint i = cg1r->_n_worker_threads - 1; i != UINT_MAX; i--) { ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(cg1r, next, refine_closure, worker_id_offset, i); assert(t != NULL, "Conc refine should have been created"); if (t->osthread() == NULL) { @@ -86,6 +85,14 @@ cg1r->_threads[i] = t; next = t; } + + cg1r->_sample_thread = new G1YoungRemSetSamplingThread(); + if (cg1r->_sample_thread->osthread() == NULL) { + *ecode = JNI_ENOMEM; + vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread"); + return NULL; + } + *ecode = JNI_OK; return cg1r; } @@ -103,44 +110,36 @@ } void ConcurrentG1Refine::stop() { - if (_threads != NULL) { - for (uint i = 0; i < _n_threads; i++) { - _threads[i]->stop(); - } + for (uint i = 0; i < _n_worker_threads; i++) { + _threads[i]->stop(); } + _sample_thread->stop(); } void ConcurrentG1Refine::reinitialize_threads() { reset_threshold_step(); - if (_threads != NULL) { - for (uint i = 0; i < _n_threads; i++) { - _threads[i]->initialize(); - } + for (uint i = 0; i < _n_worker_threads; i++) { + _threads[i]->initialize(); } } ConcurrentG1Refine::~ConcurrentG1Refine() { - if (_threads != NULL) { - for (uint i = 0; i < _n_threads; i++) { - delete _threads[i]; - } - FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads); + for (uint i = 0; i < _n_worker_threads; i++) { + delete _threads[i]; } + FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads); + + delete _sample_thread; } void ConcurrentG1Refine::threads_do(ThreadClosure *tc) { - if (_threads != NULL) { - for (uint i = 0; i < _n_threads; i++) { - tc->do_thread(_threads[i]); - } - } + worker_threads_do(tc); + tc->do_thread(_sample_thread); } void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) { - if (_threads != NULL) { - for (uint i = 0; i < worker_thread_num(); i++) { - tc->do_thread(_threads[i]); - } + for (uint i = 0; i < worker_thread_num(); i++) { + tc->do_thread(_threads[i]); } } @@ -149,12 +148,10 @@ } void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const { - for (uint i = 0; i < _n_threads; ++i) { + for (uint i = 0; i < _n_worker_threads; ++i) { _threads[i]->print_on(st); st->cr(); } + _sample_thread->print_on(st); + st->cr(); } - -ConcurrentG1RefineThread * ConcurrentG1Refine::sampling_thread() const { - return _threads[worker_thread_num()]; -} diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp Tue Oct 27 15:09:19 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp Mon Oct 26 12:22:24 2015 -0400 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP #include "gc/g1/g1HotCardCache.hpp" +#include "gc/g1/g1YoungRemSetSamplingThread.hpp" #include "memory/allocation.hpp" #include "runtime/thread.hpp" #include "utilities/globalDefinitions.hpp" @@ -39,8 +40,9 @@ class DirtyCardQueue; class ConcurrentG1Refine: public CHeapObj { + G1YoungRemSetSamplingThread* _sample_thread; + ConcurrentG1RefineThread** _threads; - uint _n_threads; uint _n_worker_threads; /* * The value of the update buffer queue length falls into one of 3 zones: @@ -91,8 +93,8 @@ // Iterate over all worker refinement threads void worker_threads_do(ThreadClosure * tc); - // The RS sampling thread - ConcurrentG1RefineThread * sampling_thread() const; + // The RS sampling thread has nothing to do with refinement, but is here for now. + G1YoungRemSetSamplingThread * sampling_thread() const { return _sample_thread; } static uint thread_num(); @@ -106,7 +108,6 @@ int yellow_zone() const { return _yellow_zone; } int red_zone() const { return _red_zone; } - uint total_thread_num() const { return _n_threads; } uint worker_thread_num() const { return _n_worker_threads; } int thread_threshold_step() const { return _thread_threshold_step; } diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp Tue Oct 27 15:09:19 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp Mon Oct 26 12:22:24 2015 -0400 @@ -50,9 +50,8 @@ // Each thread has its own monitor. The i-th thread is responsible for signaling // to thread i+1 if the number of buffers in the queue exceeds a threshold for this // thread. Monitors are also used to wake up the threads during termination. - // The 0th worker in notified by mutator threads and has a special monitor. - // The last worker is used for young gen rset size sampling. - if (worker_id > 0) { + // The 0th (primary) worker is notified by mutator threads and has a special monitor. + if (!is_primary()) { _monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true, Monitor::_safepoint_check_never); } else { @@ -66,61 +65,11 @@ } void ConcurrentG1RefineThread::initialize() { - if (_worker_id < cg1r()->worker_thread_num()) { - // Current thread activation threshold - _threshold = MIN2(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(), - cg1r()->yellow_zone()); - // A thread deactivates once the number of buffer reached a deactivation threshold - _deactivation_threshold = MAX2(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone()); - } else { - set_active(true); - } -} - -void ConcurrentG1RefineThread::sample_young_list_rs_lengths() { - SuspendibleThreadSetJoiner sts_join; - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1CollectorPolicy* g1p = g1h->g1_policy(); - if (g1p->adaptive_young_list_length()) { - int regions_visited = 0; - g1h->young_list()->rs_length_sampling_init(); - while (g1h->young_list()->rs_length_sampling_more()) { - g1h->young_list()->rs_length_sampling_next(); - ++regions_visited; - - // we try to yield every time we visit 10 regions - if (regions_visited == 10) { - if (sts_join.should_yield()) { - sts_join.yield(); - // we just abandon the iteration - break; - } - regions_visited = 0; - } - } - - g1p->revise_young_list_target_length_if_necessary(); - } -} - -void ConcurrentG1RefineThread::run_young_rs_sampling() { - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - _vtime_start = os::elapsedVTime(); - while(!_should_terminate) { - sample_young_list_rs_lengths(); - - if (os::supports_vtime()) { - _vtime_accum = (os::elapsedVTime() - _vtime_start); - } else { - _vtime_accum = 0.0; - } - - MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); - if (_should_terminate) { - break; - } - _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis); - } + // Current thread activation threshold + _threshold = MIN2(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(), + cg1r()->yellow_zone()); + // A thread deactivates once the number of buffer reached a deactivation threshold + _deactivation_threshold = MAX2(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone()); } void ConcurrentG1RefineThread::wait_for_completed_buffers() { @@ -133,12 +82,12 @@ bool ConcurrentG1RefineThread::is_active() { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - return _worker_id > 0 ? _active : dcqs.process_completed_buffers(); + return is_primary() ? dcqs.process_completed_buffers() : _active; } void ConcurrentG1RefineThread::activate() { MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); - if (_worker_id > 0) { + if (!is_primary()) { if (G1TraceConcRefinement) { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d", @@ -154,7 +103,7 @@ void ConcurrentG1RefineThread::deactivate() { MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); - if (_worker_id > 0) { + if (!is_primary()) { if (G1TraceConcRefinement) { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d", @@ -171,25 +120,24 @@ initialize_in_thread(); wait_for_universe_init(); - if (_worker_id >= cg1r()->worker_thread_num()) { - run_young_rs_sampling(); - terminate(); - return; - } + run_service(); + + terminate(); +} +void ConcurrentG1RefineThread::run_service() { _vtime_start = os::elapsedVTime(); + while (!_should_terminate) { - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - // Wait for work wait_for_completed_buffers(); - if (_should_terminate) { break; } { SuspendibleThreadSetJoiner sts_join; + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); do { int curr_buffer_num = (int)dcqs.completed_buffers_num(); @@ -199,7 +147,7 @@ dcqs.set_completed_queue_padding(0); } - if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) { + if (!is_primary() && curr_buffer_num <= _deactivation_threshold) { // If the number of the buffer has fallen below our threshold // we should deactivate. The predecessor will reactivate this // thread should the number of the buffers cross the threshold again. @@ -225,8 +173,10 @@ _vtime_accum = 0.0; } } - assert(_should_terminate, "just checking"); - terminate(); + + if (G1TraceConcRefinement) { + gclog_or_tty->print_cr("G1-Refine-stop"); + } } void ConcurrentG1RefineThread::stop() { @@ -236,10 +186,7 @@ _should_terminate = true; } - { - MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); - _monitor->notify(); - } + stop_service(); { MutexLockerEx mu(Terminator_lock); @@ -247,8 +194,9 @@ Terminator_lock->wait(); } } - if (G1TraceConcRefinement) { - gclog_or_tty->print_cr("G1-Refine-stop"); - } } +void ConcurrentG1RefineThread::stop_service() { + MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); + _monitor->notify(); +} \ No newline at end of file diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp Tue Oct 27 15:09:19 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp Mon Oct 26 12:22:24 2015 -0400 @@ -31,14 +31,14 @@ class CardTableEntryClosure; class ConcurrentG1Refine; -// The G1 Concurrent Refinement Thread (could be several in the future). - +// One or more G1 Concurrent Refinement Threads may be active if concurrent +// refinement is in progress. class ConcurrentG1RefineThread: public ConcurrentGCThread { friend class VMStructs; friend class G1CollectedHeap; double _vtime_start; // Initial virtual time. - double _vtime_accum; // Initial virtual time. + double _vtime_accum; // Accumulated virtual time. uint _worker_id; uint _worker_id_offset; @@ -59,8 +59,6 @@ // This thread deactivation threshold int _deactivation_threshold; - void sample_young_list_rs_lengths(); - void run_young_rs_sampling(); void wait_for_completed_buffers(); void set_active(bool x) { _active = x; } @@ -68,6 +66,11 @@ void activate(); void deactivate(); + bool is_primary() { return (_worker_id == 0); } + + void run_service(); + void stop_service(); + public: virtual void run(); // Constructor diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp --- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp Tue Oct 27 15:09:19 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp Mon Oct 26 12:22:24 2015 -0400 @@ -92,15 +92,31 @@ } } +// Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU. +void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) { + if (g1_policy->adaptive_young_list_length()) { + double now = os::elapsedTime(); + double prediction_ms = remark ? g1_policy->predict_remark_time_ms() + : g1_policy->predict_cleanup_time_ms(); + G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); + jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms); + os::sleep(this, sleep_time_ms, false); + } +} void ConcurrentMarkThread::run() { initialize_in_thread(); + wait_for_universe_init(); + + run_service(); + + terminate(); +} + +void ConcurrentMarkThread::run_service() { _vtime_start = os::elapsedVTime(); - wait_for_universe_init(); G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectorPolicy* g1_policy = g1h->g1_policy(); - G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); - Thread *current_thread = Thread::current(); while (!_should_terminate) { // wait until started is set. @@ -141,12 +157,7 @@ double mark_end_sec = os::elapsedTime(); _vtime_mark_accum += (mark_end_time - cycle_start); if (!cm()->has_aborted()) { - if (g1_policy->adaptive_young_list_length()) { - double now = os::elapsedTime(); - double remark_prediction_ms = g1_policy->predict_remark_time_ms(); - jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms); - os::sleep(current_thread, sleep_time_ms, false); - } + delay_to_keep_mmu(g1_policy, true /* remark */); cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec); @@ -167,12 +178,7 @@ _vtime_accum = (end_time - _vtime_start); if (!cm()->has_aborted()) { - if (g1_policy->adaptive_young_list_length()) { - double now = os::elapsedTime(); - double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms(); - jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms); - os::sleep(current_thread, sleep_time_ms, false); - } + delay_to_keep_mmu(g1_policy, false /* cleanup */); CMCleanUp cl_cl(_cm); VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */); @@ -272,9 +278,6 @@ g1h->register_concurrent_cycle_end(); } } - assert(_should_terminate, "just checking"); - - terminate(); } void ConcurrentMarkThread::stop() { @@ -283,10 +286,7 @@ _should_terminate = true; } - { - MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); - CGC_lock->notify_all(); - } + stop_service(); { MutexLockerEx ml(Terminator_lock); @@ -296,6 +296,11 @@ } } +void ConcurrentMarkThread::stop_service() { + MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); + CGC_lock->notify_all(); +} + void ConcurrentMarkThread::sleepBeforeNextCycle() { // We join here because we don't want to do the "shouldConcurrentMark()" // below while the world is otherwise stopped. diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp --- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp Tue Oct 27 15:09:19 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp Mon Oct 26 12:22:24 2015 -0400 @@ -27,11 +27,11 @@ #include "gc/shared/concurrentGCThread.hpp" -// The Concurrent Mark GC Thread (could be several in the future). -// This is copied from the Concurrent Mark Sweep GC Thread -// Still under construction. +// The Concurrent Mark GC Thread triggers the parallel CMConcurrentMarkingTasks +// as well as handling various marking cleanup. class ConcurrentMark; +class G1CollectorPolicy; class ConcurrentMarkThread: public ConcurrentGCThread { friend class VMStructs; @@ -57,6 +57,10 @@ volatile State _state; void sleepBeforeNextCycle(); + void delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark); + + void run_service(); + void stop_service(); static SurrogateLockerThread* _slt; @@ -67,9 +71,9 @@ static void makeSurrogateLockerThread(TRAPS); static SurrogateLockerThread* slt() { return _slt; } - // Total virtual time so far. + // Total virtual time so far for this thread and concurrent marking tasks. double vtime_accum(); - // Marking virtual time so far + // Marking virtual time so far this thread and concurrent marking tasks. double vtime_mark_accum(); ConcurrentMark* cm() { return _cm; } diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp Mon Oct 26 12:22:24 2015 -0400 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectorPolicy.hpp" +#include "gc/g1/g1YoungRemSetSamplingThread.hpp" +#include "gc/g1/suspendibleThreadSet.hpp" +#include "runtime/mutexLocker.hpp" + +void G1YoungRemSetSamplingThread::run() { + initialize_in_thread(); + wait_for_universe_init(); + + run_service(); + + terminate(); +} + +void G1YoungRemSetSamplingThread::stop() { + // it is ok to take late safepoints here, if needed + { + MutexLockerEx mu(Terminator_lock); + _should_terminate = true; + } + + stop_service(); + + { + MutexLockerEx mu(Terminator_lock); + while (!_has_terminated) { + Terminator_lock->wait(); + } + } +} + +G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() : ConcurrentGCThread() { + _monitor = new Monitor(Mutex::nonleaf, + "G1YoungRemSetSamplingThread monitor", + true, + Monitor::_safepoint_check_never); + + create_and_start(); + + set_name("G1 Young RemSet Sampling"); +} + +void G1YoungRemSetSamplingThread::sleep_before_next_cycle() { + MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); + if (!_should_terminate) { + intx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be? + _monitor->wait(Mutex::_no_safepoint_check_flag, waitms); + } +} + +void G1YoungRemSetSamplingThread::run_service() { + double vtime_start = os::elapsedVTime(); + + while (!_should_terminate) { + sample_young_list_rs_lengths(); + + if (os::supports_vtime()) { + _vtime_accum = (os::elapsedVTime() - vtime_start); + } else { + _vtime_accum = 0.0; + } + + sleep_before_next_cycle(); + } +} + +void G1YoungRemSetSamplingThread::stop_service() { + MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); + _monitor->notify(); +} + +void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() { + SuspendibleThreadSetJoiner sts; + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1CollectorPolicy* g1p = g1h->g1_policy(); + if (g1p->adaptive_young_list_length()) { + int regions_visited = 0; + g1h->young_list()->rs_length_sampling_init(); + while (g1h->young_list()->rs_length_sampling_more()) { + g1h->young_list()->rs_length_sampling_next(); + ++regions_visited; + + // we try to yield every time we visit 10 regions + if (regions_visited == 10) { + if (sts.should_yield()) { + sts.yield(); + // we just abandon the iteration + break; + } + regions_visited = 0; + } + } + + g1p->revise_young_list_target_length_if_necessary(); + } +} diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp Mon Oct 26 12:22:24 2015 -0400 @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP +#define SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP + +#include "gc/shared/concurrentGCThread.hpp" + +// The G1YoungRemSetSamplingThread is used to re-assess the validity of +// the prediction for the remembered set lengths of the young generation. +// +// At the end of the GC G1 determines the length of the young gen based on +// how much time the next GC can take, and when the next GC may occur +// according to the MMU. +// +// The assumption is that a significant part of the GC is spent on scanning +// the remembered sets (and many other components), so this thread constantly +// reevaluates the prediction for the remembered set scanning costs, and potentially +// G1CollectorPolicy resizes the young gen. This may do a premature GC or even +// increase the young gen size to keep pause time length goal. +class G1YoungRemSetSamplingThread: public ConcurrentGCThread { +private: + Monitor* _monitor; + + void sample_young_list_rs_lengths(); + + void run_service(); + void stop_service(); + + void sleep_before_next_cycle(); + + double _vtime_accum; // Accumulated virtual time. + +public: + G1YoungRemSetSamplingThread(); + double vtime_accum() { return _vtime_accum; } + + virtual void run(); + void stop(); +}; + +#endif /* SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP */ diff -r 2db29ded3865 -r 7afc768e4d62 hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp --- a/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp Tue Oct 27 15:09:19 2015 +0000 +++ b/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp Mon Oct 26 12:22:24 2015 -0400 @@ -66,6 +66,7 @@ } void ConcurrentGCThread::terminate() { + assert(_should_terminate, "Should only be called on terminate request."); // Signal that it is terminated { MutexLockerEx mu(Terminator_lock,