8200371: In g1, rename ConcurrentMarkThread to G1ConcurrentMarkThread
Reviewed-by: tschatzl, sangheki
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp Tue Apr 03 13:15:27 2018 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,418 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderData.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
-#include "gc/g1/g1Analytics.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1MMUTracker.hpp"
-#include "gc/g1/g1Policy.hpp"
-#include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/vm_operations_g1.hpp"
-#include "gc/shared/concurrentGCPhaseManager.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "logging/log.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/debug.hpp"
-
-// ======= Concurrent Mark Thread ========
-
-// Check order in EXPAND_CURRENT_PHASES
-STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
- ConcurrentGCPhaseManager::IDLE_PHASE);
-
-#define EXPAND_CONCURRENT_PHASES(expander) \
- expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL) \
- expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL) \
- expander(CONCURRENT_CYCLE,, "Concurrent Cycle") \
- expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks") \
- expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions") \
- expander(CONCURRENT_MARK,, "Concurrent Mark") \
- expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots") \
- expander(BEFORE_REMARK,, NULL) \
- expander(REMARK,, NULL) \
- expander(REBUILD_REMEMBERED_SETS,, "Concurrent Rebuild Remembered Sets") \
- expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark") \
- /* */
-
-class G1ConcurrentPhase : public AllStatic {
-public:
- enum {
-#define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
- EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
-#undef CONCURRENT_PHASE_ENUM
- PHASE_ID_LIMIT
- };
-};
-
-// The CM thread is created when the G1 garbage collector is used
-
-ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
- ConcurrentGCThread(),
- _cm(cm),
- _state(Idle),
- _phase_manager_stack(),
- _vtime_accum(0.0),
- _vtime_mark_accum(0.0) {
-
- set_name("G1 Main Marker");
- create_and_start();
-}
-
-class CMRemark : public VoidClosure {
- G1ConcurrentMark* _cm;
-public:
- CMRemark(G1ConcurrentMark* cm) : _cm(cm) {}
-
- void do_void(){
- _cm->remark();
- }
-};
-
-class CMCleanup : public VoidClosure {
- G1ConcurrentMark* _cm;
-public:
- CMCleanup(G1ConcurrentMark* cm) : _cm(cm) {}
-
- void do_void(){
- _cm->cleanup();
- }
-};
-
-double ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark) {
- // There are 3 reasons to use SuspendibleThreadSetJoiner.
- // 1. To avoid concurrency problem.
- // - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called
- // concurrently from ConcurrentMarkThread and VMThread.
- // 2. If currently a gc is running, but it has not yet updated the MMU,
- // we will not forget to consider that pause in the MMU calculation.
- // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished.
- // And then sleep for predicted amount of time by delay_to_keep_mmu().
- SuspendibleThreadSetJoiner sts_join;
-
- const G1Analytics* analytics = g1_policy->analytics();
- double now = os::elapsedTime();
- double prediction_ms = remark ? analytics->predict_remark_time_ms()
- : analytics->predict_cleanup_time_ms();
- G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
- return mmu_tracker->when_ms(now, prediction_ms);
-}
-
-void ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
- if (g1_policy->adaptive_young_list_length()) {
- jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
- if (!_cm->has_aborted() && sleep_time_ms > 0) {
- os::sleep(this, sleep_time_ms, false);
- }
- }
-}
-
-class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
- G1ConcurrentMark* _cm;
-
- public:
- G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
- GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking>(title),
- _cm(cm)
- {
- _cm->gc_timer_cm()->register_gc_concurrent_start(title);
- }
-
- ~G1ConcPhaseTimer() {
- _cm->gc_timer_cm()->register_gc_concurrent_end();
- }
-};
-
-static const char* const concurrent_phase_names[] = {
-#define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
- EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
-#undef CONCURRENT_PHASE_NAME
- NULL // terminator
-};
-// Verify dense enum assumption. +1 for terminator.
-STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
- ARRAY_SIZE(concurrent_phase_names));
-
-// Returns the phase number for name, or a negative value if unknown.
-static int lookup_concurrent_phase(const char* name) {
- const char* const* names = concurrent_phase_names;
- for (uint i = 0; names[i] != NULL; ++i) {
- if (strcmp(name, names[i]) == 0) {
- return static_cast<int>(i);
- }
- }
- return -1;
-}
-
-// The phase must be valid and must have a title.
-static const char* lookup_concurrent_phase_title(int phase) {
- static const char* const titles[] = {
-#define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
- EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
-#undef CONCURRENT_PHASE_TITLE
- };
- // Verify dense enum assumption.
- STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
-
- assert(0 <= phase, "precondition");
- assert((uint)phase < ARRAY_SIZE(titles), "precondition");
- const char* title = titles[phase];
- assert(title != NULL, "precondition");
- return title;
-}
-
-class G1ConcPhaseManager : public StackObj {
- G1ConcurrentMark* _cm;
- ConcurrentGCPhaseManager _manager;
-
-public:
- G1ConcPhaseManager(int phase, ConcurrentMarkThread* thread) :
- _cm(thread->cm()),
- _manager(phase, thread->phase_manager_stack())
- { }
-
- ~G1ConcPhaseManager() {
- // Deactivate the manager if marking aborted, to avoid blocking on
- // phase exit when the phase has been requested.
- if (_cm->has_aborted()) {
- _manager.deactivate();
- }
- }
-
- void set_phase(int phase, bool force) {
- _manager.set_phase(phase, force);
- }
-};
-
-// Combine phase management and timing into one convenient utility.
-class G1ConcPhase : public StackObj {
- G1ConcPhaseTimer _timer;
- G1ConcPhaseManager _manager;
-
-public:
- G1ConcPhase(int phase, ConcurrentMarkThread* thread) :
- _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
- _manager(phase, thread)
- { }
-};
-
-const char* const* ConcurrentMarkThread::concurrent_phases() const {
- return concurrent_phase_names;
-}
-
-bool ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
- int phase = lookup_concurrent_phase(phase_name);
- if (phase < 0) return false;
-
- while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
- phase_manager_stack())) {
- assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
- if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
- // If idle and the goal is !idle, start a collection.
- G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
- }
- }
- return true;
-}
-
-void ConcurrentMarkThread::run_service() {
- _vtime_start = os::elapsedVTime();
-
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- G1Policy* g1_policy = g1h->g1_policy();
-
- G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
-
- while (!should_terminate()) {
- // wait until started is set.
- sleep_before_next_cycle();
- if (should_terminate()) {
- break;
- }
-
- cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
-
- GCIdMark gc_id_mark;
-
- _cm->concurrent_cycle_start();
-
- GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
- {
- ResourceMark rm;
- HandleMark hm;
- double cycle_start = os::elapsedVTime();
-
- {
- G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
- ClassLoaderDataGraph::clear_claimed_marks();
- }
-
- // We have to ensure that we finish scanning the root regions
- // before the next GC takes place. To ensure this we have to
- // make sure that we do not join the STS until the root regions
- // have been scanned. If we did then it's possible that a
- // subsequent GC could block us from joining the STS and proceed
- // without the root regions have been scanned which would be a
- // correctness issue.
-
- {
- G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
- _cm->scan_root_regions();
- }
-
- // It would be nice to use the G1ConcPhase class here but
- // the "end" logging is inside the loop and not at the end of
- // a scope. Also, the timer doesn't support nesting.
- // Mimicking the same log output instead.
- {
- G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
- jlong mark_start = os::elapsed_counter();
- const char* cm_title = lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
- log_info(gc, marking)("%s (%.3fs)",
- cm_title,
- TimeHelper::counter_to_seconds(mark_start));
- for (uint iter = 1; !_cm->has_aborted(); ++iter) {
- // Concurrent marking.
- {
- G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
- _cm->mark_from_roots();
- }
- if (_cm->has_aborted()) {
- break;
- }
-
- // Provide a control point after mark_from_roots.
- {
- G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
- }
- if (_cm->has_aborted()) {
- break;
- }
-
- // Delay remark pause for MMU.
- double mark_end_time = os::elapsedVTime();
- jlong mark_end = os::elapsed_counter();
- _vtime_mark_accum += (mark_end_time - cycle_start);
- delay_to_keep_mmu(g1_policy, true /* remark */);
- if (_cm->has_aborted()) {
- break;
- }
-
- // Pause Remark.
- log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
- cm_title,
- TimeHelper::counter_to_seconds(mark_start),
- TimeHelper::counter_to_seconds(mark_end),
- TimeHelper::counter_to_millis(mark_end - mark_start));
- mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
- CMRemark cl(_cm);
- VM_CGC_Operation op(&cl, "Pause Remark");
- VMThread::execute(&op);
- if (_cm->has_aborted()) {
- break;
- } else if (!_cm->restart_for_overflow()) {
- break; // Exit loop if no restart requested.
- } else {
- // Loop to restart for overflow.
- mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
- log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
- cm_title, iter);
- }
- }
- }
-
- if (!_cm->has_aborted()) {
- G1ConcPhase p(G1ConcurrentPhase::REBUILD_REMEMBERED_SETS, this);
- _cm->rebuild_rem_set_concurrently();
- }
-
- double end_time = os::elapsedVTime();
- // Update the total virtual time before doing this, since it will try
- // to measure it to get the vtime for this marking.
- _vtime_accum = (end_time - _vtime_start);
-
- if (!_cm->has_aborted()) {
- delay_to_keep_mmu(g1_policy, false /* cleanup */);
- }
-
- if (!_cm->has_aborted()) {
- CMCleanup cl_cl(_cm);
- VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
- VMThread::execute(&op);
- }
-
- // We now want to allow clearing of the marking bitmap to be
- // suspended by a collection pause.
- // We may have aborted just before the remark. Do not bother clearing the
- // bitmap then, as it has been done during mark abort.
- if (!_cm->has_aborted()) {
- G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
- _cm->cleanup_for_next_mark();
- } else {
- assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
- }
- }
-
- // Update the number of full collections that have been
- // completed. This will also notify the FullGCCount_lock in case a
- // Java thread is waiting for a full GC to happen (e.g., it
- // called System.gc() with +ExplicitGCInvokesConcurrent).
- {
- SuspendibleThreadSetJoiner sts_join;
- g1h->increment_old_marking_cycles_completed(true /* concurrent */);
-
- _cm->concurrent_cycle_end();
- }
-
- cpmanager.set_phase(G1ConcurrentPhase::IDLE, _cm->has_aborted() /* force */);
- }
- _cm->root_regions()->cancel_scan();
-}
-
-void ConcurrentMarkThread::stop_service() {
- MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
- CGC_lock->notify_all();
-}
-
-void ConcurrentMarkThread::sleep_before_next_cycle() {
- // We join here because we don't want to do the "shouldConcurrentMark()"
- // below while the world is otherwise stopped.
- assert(!in_progress(), "should have been cleared");
-
- MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
- while (!started() && !should_terminate()) {
- CGC_lock->wait(Mutex::_no_safepoint_check_flag);
- }
-
- if (started()) {
- set_in_progress();
- }
-}
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.hpp Tue Apr 03 13:15:27 2018 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
-#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
-
-#include "gc/shared/concurrentGCPhaseManager.hpp"
-#include "gc/shared/concurrentGCThread.hpp"
-
-class G1ConcurrentMark;
-class G1Policy;
-
-// The concurrent mark thread triggers the various steps of the concurrent marking
-// cycle, including various marking cleanup.
-class ConcurrentMarkThread: public ConcurrentGCThread {
- friend class VMStructs;
-
- double _vtime_start; // Initial virtual time.
- double _vtime_accum; // Accumulated virtual time.
- double _vtime_mark_accum;
-
- G1ConcurrentMark* _cm;
-
- enum State {
- Idle,
- Started,
- InProgress
- };
-
- volatile State _state;
-
- // WhiteBox testing support.
- ConcurrentGCPhaseManager::Stack _phase_manager_stack;
-
- void sleep_before_next_cycle();
- // Delay marking to meet MMU.
- void delay_to_keep_mmu(G1Policy* g1_policy, bool remark);
- double mmu_sleep_time(G1Policy* g1_policy, bool remark);
-
- void run_service();
- void stop_service();
-
- public:
- // Constructor
- ConcurrentMarkThread(G1ConcurrentMark* cm);
-
- // Total virtual time so far for this thread and concurrent marking tasks.
- double vtime_accum();
- // Marking virtual time so far this thread and concurrent marking tasks.
- double vtime_mark_accum();
-
- G1ConcurrentMark* cm() { return _cm; }
-
- void set_idle() { assert(_state != Started, "must not be starting a new cycle"); _state = Idle; }
- bool idle() { return _state == Idle; }
- void set_started() { assert(_state == Idle, "cycle in progress"); _state = Started; }
- bool started() { return _state == Started; }
- void set_in_progress() { assert(_state == Started, "must be starting a cycle"); _state = InProgress; }
- bool in_progress() { return _state == InProgress; }
-
- // Returns true from the moment a marking cycle is
- // initiated (during the initial-mark pause when started() is set)
- // to the moment when the cycle completes (just after the next
- // marking bitmap has been cleared and in_progress() is
- // cleared). While during_cycle() is true we will not start another cycle
- // so that cycles do not overlap. We cannot use just in_progress()
- // as the CM thread might take some time to wake up before noticing
- // that started() is set and set in_progress().
- bool during_cycle() { return !idle(); }
-
- // WhiteBox testing support.
- const char* const* concurrent_phases() const;
- bool request_concurrent_phase(const char* phase);
-
- ConcurrentGCPhaseManager::Stack* phase_manager_stack() {
- return &_phase_manager_stack;
- }
-};
-
-#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.inline.hpp Tue Apr 03 13:15:27 2018 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
-#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
-
-#include "gc/g1/concurrentMarkThread.hpp"
-#include "gc/g1/g1ConcurrentMark.hpp"
-
- // Total virtual time so far.
-inline double ConcurrentMarkThread::vtime_accum() {
- return _vtime_accum + _cm->all_task_accum_vtime();
-}
-
-// Marking virtual time so far
-inline double ConcurrentMarkThread::vtime_mark_accum() {
- return _vtime_mark_accum + _cm->all_task_accum_vtime();
-}
-
-#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue Apr 03 12:05:49 2018 +0200
@@ -29,7 +29,6 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/g1/bufferingOopClosure.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
@@ -37,6 +36,7 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Tue Apr 03 12:05:49 2018 +0200
@@ -79,7 +79,7 @@
class G1YoungRemSetSamplingThread;
class HeapRegionRemSetIterator;
class G1ConcurrentMark;
-class ConcurrentMarkThread;
+class G1ConcurrentMarkThread;
class G1ConcurrentRefine;
class GenerationCounters;
class STWGCTimer;
@@ -766,7 +766,7 @@
// The concurrent marker (and the thread it runs in.)
G1ConcurrentMark* _cm;
- ConcurrentMarkThread* _cmThread;
+ G1ConcurrentMarkThread* _cmThread;
// The concurrent refiner.
G1ConcurrentRefine* _cr;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Tue Apr 03 12:05:49 2018 +0200
@@ -26,10 +26,10 @@
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1Policy.hpp"
@@ -398,7 +398,7 @@
_mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
// Create & start ConcurrentMark thread.
- _cm_thread = new ConcurrentMarkThread(this);
+ _cm_thread = new G1ConcurrentMarkThread(this);
if (_cm_thread->osthread() == NULL) {
vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
}
@@ -824,6 +824,7 @@
class G1CMConcurrentMarkingTask : public AbstractGangTask {
G1ConcurrentMark* _cm;
+
public:
void work(uint worker_id) {
assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Tue Apr 03 12:05:49 2018 +0200
@@ -33,7 +33,7 @@
#include "memory/allocation.hpp"
class ConcurrentGCTimer;
-class ConcurrentMarkThread;
+class G1ConcurrentMarkThread;
class G1CollectedHeap;
class G1CMTask;
class G1ConcurrentMark;
@@ -277,7 +277,7 @@
// This class manages data structures and methods for doing liveness analysis in
// G1's concurrent cycle.
class G1ConcurrentMark : public CHeapObj<mtGC> {
- friend class ConcurrentMarkThread;
+ friend class G1ConcurrentMarkThread;
friend class G1CMRefProcTaskProxy;
friend class G1CMRefProcTaskExecutor;
friend class G1CMKeepAliveAndDrainClosure;
@@ -287,35 +287,35 @@
friend class G1CMRemarkTask;
friend class G1CMTask;
- ConcurrentMarkThread* _cm_thread; // The thread doing the work
- G1CollectedHeap* _g1h; // The heap
- bool _completed_initialization; // Set to true when initialization is complete
+ G1ConcurrentMarkThread* _cm_thread; // The thread doing the work
+ G1CollectedHeap* _g1h; // The heap
+ bool _completed_initialization; // Set to true when initialization is complete
// Concurrent marking support structures
- G1CMBitMap _mark_bitmap_1;
- G1CMBitMap _mark_bitmap_2;
- G1CMBitMap* _prev_mark_bitmap; // Completed mark bitmap
- G1CMBitMap* _next_mark_bitmap; // Under-construction mark bitmap
+ G1CMBitMap _mark_bitmap_1;
+ G1CMBitMap _mark_bitmap_2;
+ G1CMBitMap* _prev_mark_bitmap; // Completed mark bitmap
+ G1CMBitMap* _next_mark_bitmap; // Under-construction mark bitmap
// Heap bounds
- MemRegion const _heap;
+ MemRegion const _heap;
// Root region tracking and claiming
- G1CMRootRegions _root_regions;
+ G1CMRootRegions _root_regions;
// For grey objects
- G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
- HeapWord* volatile _finger; // The global finger, region aligned,
- // always pointing to the end of the
- // last claimed region
+ G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
+ HeapWord* volatile _finger; // The global finger, region aligned,
+ // always pointing to the end of the
+ // last claimed region
- uint _worker_id_offset;
- uint _max_num_tasks; // Maximum number of marking tasks
- uint _num_active_tasks; // Number of tasks currently active
- G1CMTask** _tasks; // Task queue array (max_worker_id length)
+ uint _worker_id_offset;
+ uint _max_num_tasks; // Maximum number of marking tasks
+ uint _num_active_tasks; // Number of tasks currently active
+ G1CMTask** _tasks; // Task queue array (max_worker_id length)
- G1CMTaskQueueSet* _task_queues; // Task queue set
- ParallelTaskTerminator _terminator; // For termination
+ G1CMTaskQueueSet* _task_queues; // Task queue set
+ ParallelTaskTerminator _terminator; // For termination
// Two sync barriers that are used to synchronize tasks when an
// overflow occurs. The algorithm is the following. All tasks enter
@@ -326,30 +326,30 @@
// ensure, that no task starts doing work before all data
// structures (local and global) have been re-initialized. When they
// exit it, they are free to start working again.
- WorkGangBarrierSync _first_overflow_barrier_sync;
- WorkGangBarrierSync _second_overflow_barrier_sync;
+ WorkGangBarrierSync _first_overflow_barrier_sync;
+ WorkGangBarrierSync _second_overflow_barrier_sync;
// This is set by any task, when an overflow on the global data
// structures is detected
- volatile bool _has_overflown;
+ volatile bool _has_overflown;
// True: marking is concurrent, false: we're in remark
- volatile bool _concurrent;
+ volatile bool _concurrent;
// Set at the end of a Full GC so that marking aborts
- volatile bool _has_aborted;
+ volatile bool _has_aborted;
// Used when remark aborts due to an overflow to indicate that
// another concurrent marking phase should start
- volatile bool _restart_for_overflow;
+ volatile bool _restart_for_overflow;
// This is true from the very start of concurrent marking until the
// point when all the tasks complete their work. It is really used
// to determine the points between the end of concurrent marking and
// time of remark.
- volatile bool _concurrent_marking_in_progress;
+ volatile bool _concurrent_marking_in_progress;
- ConcurrentGCTimer* _gc_timer_cm;
+ ConcurrentGCTimer* _gc_timer_cm;
- G1OldTracer* _gc_tracer_cm;
+ G1OldTracer* _gc_tracer_cm;
// Timing statistics. All of them are in ms
NumberSeq _init_times;
@@ -523,7 +523,7 @@
G1RegionToSpaceMapper* next_bitmap_storage);
~G1ConcurrentMark();
- ConcurrentMarkThread* cm_thread() { return _cm_thread; }
+ G1ConcurrentMarkThread* cm_thread() { return _cm_thread; }
const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; }
G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp Tue Apr 03 12:05:49 2018 +0200
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "gc/g1/g1Analytics.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMark.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
+#include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1RemSet.hpp"
+#include "gc/g1/vm_operations_g1.hpp"
+#include "gc/shared/concurrentGCPhaseManager.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/debug.hpp"
+
+// ======= Concurrent Mark Thread ========
+
+// Check order in EXPAND_CURRENT_PHASES
+STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
+ ConcurrentGCPhaseManager::IDLE_PHASE);
+
+#define EXPAND_CONCURRENT_PHASES(expander) \
+ expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL) \
+ expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL) \
+ expander(CONCURRENT_CYCLE,, "Concurrent Cycle") \
+ expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks") \
+ expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions") \
+ expander(CONCURRENT_MARK,, "Concurrent Mark") \
+ expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots") \
+ expander(BEFORE_REMARK,, NULL) \
+ expander(REMARK,, NULL) \
+ expander(REBUILD_REMEMBERED_SETS,, "Concurrent Rebuild Remembered Sets") \
+ expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark") \
+ /* */
+
+class G1ConcurrentPhase : public AllStatic {
+public:
+ enum {
+#define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
+ EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
+#undef CONCURRENT_PHASE_ENUM
+ PHASE_ID_LIMIT
+ };
+};
+
+// The CM thread is created when the G1 garbage collector is used
+
+G1ConcurrentMarkThread::G1ConcurrentMarkThread(G1ConcurrentMark* cm) :
+ ConcurrentGCThread(),
+ _cm(cm),
+ _state(Idle),
+ _phase_manager_stack(),
+ _vtime_accum(0.0),
+ _vtime_mark_accum(0.0) {
+
+ set_name("G1 Main Marker");
+ create_and_start();
+}
+
+class CMRemark : public VoidClosure {
+ G1ConcurrentMark* _cm;
+public:
+ CMRemark(G1ConcurrentMark* cm) : _cm(cm) {}
+
+ void do_void(){
+ _cm->remark();
+ }
+};
+
+class CMCleanup : public VoidClosure {
+ G1ConcurrentMark* _cm;
+public:
+ CMCleanup(G1ConcurrentMark* cm) : _cm(cm) {}
+
+ void do_void(){
+ _cm->cleanup();
+ }
+};
+
+double G1ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark) {
+ // There are 3 reasons to use SuspendibleThreadSetJoiner.
+ // 1. To avoid concurrency problem.
+ // - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called
+ // concurrently from ConcurrentMarkThread and VMThread.
+ // 2. If currently a gc is running, but it has not yet updated the MMU,
+ // we will not forget to consider that pause in the MMU calculation.
+ // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished.
+ // And then sleep for predicted amount of time by delay_to_keep_mmu().
+ SuspendibleThreadSetJoiner sts_join;
+
+ const G1Analytics* analytics = g1_policy->analytics();
+ double now = os::elapsedTime();
+ double prediction_ms = remark ? analytics->predict_remark_time_ms()
+ : analytics->predict_cleanup_time_ms();
+ G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
+ return mmu_tracker->when_ms(now, prediction_ms);
+}
+
+void G1ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
+ if (g1_policy->adaptive_young_list_length()) {
+ jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
+ if (!_cm->has_aborted() && sleep_time_ms > 0) {
+ os::sleep(this, sleep_time_ms, false);
+ }
+ }
+}
+
+class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
+ G1ConcurrentMark* _cm;
+
+ public:
+ G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
+ GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking>(title),
+ _cm(cm)
+ {
+ _cm->gc_timer_cm()->register_gc_concurrent_start(title);
+ }
+
+ ~G1ConcPhaseTimer() {
+ _cm->gc_timer_cm()->register_gc_concurrent_end();
+ }
+};
+
+static const char* const concurrent_phase_names[] = {
+#define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
+ EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
+#undef CONCURRENT_PHASE_NAME
+ NULL // terminator
+};
+// Verify dense enum assumption. +1 for terminator.
+STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
+ ARRAY_SIZE(concurrent_phase_names));
+
+// Returns the phase number for name, or a negative value if unknown.
+static int lookup_concurrent_phase(const char* name) {
+ const char* const* names = concurrent_phase_names;
+ for (uint i = 0; names[i] != NULL; ++i) {
+ if (strcmp(name, names[i]) == 0) {
+ return static_cast<int>(i);
+ }
+ }
+ return -1;
+}
+
+// The phase must be valid and must have a title.
+static const char* lookup_concurrent_phase_title(int phase) {
+ static const char* const titles[] = {
+#define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
+ EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
+#undef CONCURRENT_PHASE_TITLE
+ };
+ // Verify dense enum assumption.
+ STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
+
+ assert(0 <= phase, "precondition");
+ assert((uint)phase < ARRAY_SIZE(titles), "precondition");
+ const char* title = titles[phase];
+ assert(title != NULL, "precondition");
+ return title;
+}
+
+class G1ConcPhaseManager : public StackObj {
+ G1ConcurrentMark* _cm;
+ ConcurrentGCPhaseManager _manager;
+
+public:
+ G1ConcPhaseManager(int phase, G1ConcurrentMarkThread* thread) :
+ _cm(thread->cm()),
+ _manager(phase, thread->phase_manager_stack())
+ { }
+
+ ~G1ConcPhaseManager() {
+ // Deactivate the manager if marking aborted, to avoid blocking on
+ // phase exit when the phase has been requested.
+ if (_cm->has_aborted()) {
+ _manager.deactivate();
+ }
+ }
+
+ void set_phase(int phase, bool force) {
+ _manager.set_phase(phase, force);
+ }
+};
+
+// Combine phase management and timing into one convenient utility.
+class G1ConcPhase : public StackObj {
+ G1ConcPhaseTimer _timer;
+ G1ConcPhaseManager _manager;
+
+public:
+ G1ConcPhase(int phase, G1ConcurrentMarkThread* thread) :
+ _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
+ _manager(phase, thread)
+ { }
+};
+
+const char* const* G1ConcurrentMarkThread::concurrent_phases() const {
+ return concurrent_phase_names;
+}
+
+bool G1ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
+ int phase = lookup_concurrent_phase(phase_name);
+ if (phase < 0) return false;
+
+ while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
+ phase_manager_stack())) {
+ assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
+ if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
+ // If idle and the goal is !idle, start a collection.
+ G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
+ }
+ }
+ return true;
+}
+
+void G1ConcurrentMarkThread::run_service() {
+ _vtime_start = os::elapsedVTime();
+
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ G1Policy* g1_policy = g1h->g1_policy();
+
+ G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
+
+ while (!should_terminate()) {
+ // wait until started is set.
+ sleep_before_next_cycle();
+ if (should_terminate()) {
+ break;
+ }
+
+ cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
+
+ GCIdMark gc_id_mark;
+
+ _cm->concurrent_cycle_start();
+
+ GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
+ {
+ ResourceMark rm;
+ HandleMark hm;
+ double cycle_start = os::elapsedVTime();
+
+ {
+ G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
+ ClassLoaderDataGraph::clear_claimed_marks();
+ }
+
+ // We have to ensure that we finish scanning the root regions
+ // before the next GC takes place. To ensure this we have to
+ // make sure that we do not join the STS until the root regions
+ // have been scanned. If we did then it's possible that a
+ // subsequent GC could block us from joining the STS and proceed
+ // without the root regions have been scanned which would be a
+ // correctness issue.
+
+ {
+ G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
+ _cm->scan_root_regions();
+ }
+
+ // It would be nice to use the G1ConcPhase class here but
+ // the "end" logging is inside the loop and not at the end of
+ // a scope. Also, the timer doesn't support nesting.
+ // Mimicking the same log output instead.
+ {
+ G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
+ jlong mark_start = os::elapsed_counter();
+ const char* cm_title = lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
+ log_info(gc, marking)("%s (%.3fs)",
+ cm_title,
+ TimeHelper::counter_to_seconds(mark_start));
+ for (uint iter = 1; !_cm->has_aborted(); ++iter) {
+ // Concurrent marking.
+ {
+ G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
+ _cm->mark_from_roots();
+ }
+ if (_cm->has_aborted()) {
+ break;
+ }
+
+ // Provide a control point after mark_from_roots.
+ {
+ G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
+ }
+ if (_cm->has_aborted()) {
+ break;
+ }
+
+ // Delay remark pause for MMU.
+ double mark_end_time = os::elapsedVTime();
+ jlong mark_end = os::elapsed_counter();
+ _vtime_mark_accum += (mark_end_time - cycle_start);
+ delay_to_keep_mmu(g1_policy, true /* remark */);
+ if (_cm->has_aborted()) {
+ break;
+ }
+
+ // Pause Remark.
+ log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
+ cm_title,
+ TimeHelper::counter_to_seconds(mark_start),
+ TimeHelper::counter_to_seconds(mark_end),
+ TimeHelper::counter_to_millis(mark_end - mark_start));
+ mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
+ CMRemark cl(_cm);
+ VM_CGC_Operation op(&cl, "Pause Remark");
+ VMThread::execute(&op);
+ if (_cm->has_aborted()) {
+ break;
+ } else if (!_cm->restart_for_overflow()) {
+ break; // Exit loop if no restart requested.
+ } else {
+ // Loop to restart for overflow.
+ mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
+ log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
+ cm_title, iter);
+ }
+ }
+ }
+
+ if (!_cm->has_aborted()) {
+ G1ConcPhase p(G1ConcurrentPhase::REBUILD_REMEMBERED_SETS, this);
+ _cm->rebuild_rem_set_concurrently();
+ }
+
+ double end_time = os::elapsedVTime();
+ // Update the total virtual time before doing this, since it will try
+ // to measure it to get the vtime for this marking.
+ _vtime_accum = (end_time - _vtime_start);
+
+ if (!_cm->has_aborted()) {
+ delay_to_keep_mmu(g1_policy, false /* cleanup */);
+ }
+
+ if (!_cm->has_aborted()) {
+ CMCleanup cl_cl(_cm);
+ VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
+ VMThread::execute(&op);
+ }
+
+ // We now want to allow clearing of the marking bitmap to be
+ // suspended by a collection pause.
+ // We may have aborted just before the remark. Do not bother clearing the
+ // bitmap then, as it has been done during mark abort.
+ if (!_cm->has_aborted()) {
+ G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
+ _cm->cleanup_for_next_mark();
+ } else {
+ assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
+ }
+ }
+
+ // Update the number of full collections that have been
+ // completed. This will also notify the FullGCCount_lock in case a
+ // Java thread is waiting for a full GC to happen (e.g., it
+ // called System.gc() with +ExplicitGCInvokesConcurrent).
+ {
+ SuspendibleThreadSetJoiner sts_join;
+ g1h->increment_old_marking_cycles_completed(true /* concurrent */);
+
+ _cm->concurrent_cycle_end();
+ }
+
+ cpmanager.set_phase(G1ConcurrentPhase::IDLE, _cm->has_aborted() /* force */);
+ }
+ _cm->root_regions()->cancel_scan();
+}
+
+void G1ConcurrentMarkThread::stop_service() {
+ MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
+ CGC_lock->notify_all();
+}
+
+
+void G1ConcurrentMarkThread::sleep_before_next_cycle() {
+ // We join here because we don't want to do the "shouldConcurrentMark()"
+ // below while the world is otherwise stopped.
+ assert(!in_progress(), "should have been cleared");
+
+ MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+ while (!started() && !should_terminate()) {
+ CGC_lock->wait(Mutex::_no_safepoint_check_flag);
+ }
+
+ if (started()) {
+ set_in_progress();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp Tue Apr 03 12:05:49 2018 +0200
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
+#define SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
+
+#include "gc/shared/concurrentGCPhaseManager.hpp"
+#include "gc/shared/concurrentGCThread.hpp"
+
+class G1ConcurrentMark;
+class G1Policy;
+
+// The concurrent mark thread triggers the various steps of the concurrent marking
+// cycle, including various marking cleanup.
+class G1ConcurrentMarkThread: public ConcurrentGCThread {
+ friend class VMStructs;
+
+ double _vtime_start; // Initial virtual time.
+ double _vtime_accum; // Accumulated virtual time.
+ double _vtime_mark_accum;
+
+ G1ConcurrentMark* _cm;
+
+ enum State {
+ Idle,
+ Started,
+ InProgress
+ };
+
+ volatile State _state;
+
+ // WhiteBox testing support.
+ ConcurrentGCPhaseManager::Stack _phase_manager_stack;
+
+ void sleep_before_next_cycle();
+ // Delay marking to meet MMU.
+ void delay_to_keep_mmu(G1Policy* g1_policy, bool remark);
+ double mmu_sleep_time(G1Policy* g1_policy, bool remark);
+
+ void run_service();
+ void stop_service();
+
+ public:
+ // Constructor
+ G1ConcurrentMarkThread(G1ConcurrentMark* cm);
+
+ // Total virtual time so far for this thread and concurrent marking tasks.
+ double vtime_accum();
+ // Marking virtual time so far this thread and concurrent marking tasks.
+ double vtime_mark_accum();
+
+ G1ConcurrentMark* cm() { return _cm; }
+
+ void set_idle() { assert(_state != Started, "must not be starting a new cycle"); _state = Idle; }
+ bool idle() { return _state == Idle; }
+ void set_started() { assert(_state == Idle, "cycle in progress"); _state = Started; }
+ bool started() { return _state == Started; }
+ void set_in_progress() { assert(_state == Started, "must be starting a cycle"); _state = InProgress; }
+ bool in_progress() { return _state == InProgress; }
+
+ // Returns true from the moment a marking cycle is
+ // initiated (during the initial-mark pause when started() is set)
+ // to the moment when the cycle completes (just after the next
+ // marking bitmap has been cleared and in_progress() is
+ // cleared). While during_cycle() is true we will not start another cycle
+ // so that cycles do not overlap. We cannot use just in_progress()
+ // as the CM thread might take some time to wake up before noticing
+ // that started() is set and set in_progress().
+ bool during_cycle() { return !idle(); }
+
+ // WhiteBox testing support.
+ const char* const* concurrent_phases() const;
+ bool request_concurrent_phase(const char* phase);
+
+ ConcurrentGCPhaseManager::Stack* phase_manager_stack() {
+ return &_phase_manager_stack;
+ }
+};
+
+#endif // SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp Tue Apr 03 12:05:49 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
+#define SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
+
+#include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
+
+ // Total virtual time so far.
+inline double G1ConcurrentMarkThread::vtime_accum() {
+ return _vtime_accum + _cm->all_task_accum_vtime();
+}
+
+// Marking virtual time so far
+inline double G1ConcurrentMarkThread::vtime_mark_accum() {
+ return _vtime_mark_accum + _cm->all_task_accum_vtime();
+}
+
+#endif // SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp Tue Apr 03 12:05:49 2018 +0200
@@ -23,10 +23,10 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RemSet.hpp"
--- a/src/hotspot/share/gc/g1/g1Policy.cpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp Tue Apr 03 12:05:49 2018 +0200
@@ -23,11 +23,11 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1IHOPControl.hpp"
--- a/src/hotspot/share/gc/g1/vm_operations_g1.cpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/gc/g1/vm_operations_g1.cpp Tue Apr 03 12:05:49 2018 +0200
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/g1/vm_operations_g1.hpp"
--- a/src/hotspot/share/prims/whitebox.cpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp Tue Apr 03 12:05:49 2018 +0200
@@ -73,9 +73,9 @@
#include "prims/cdsoffsets.hpp"
#endif // INCLUDE_CDS
#if INCLUDE_ALL_GCS
-#include "gc/g1/concurrentMarkThread.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/adjoiningGenerations.hpp"
--- a/src/hotspot/share/runtime/thread.cpp Tue Apr 03 13:15:27 2018 +0200
+++ b/src/hotspot/share/runtime/thread.cpp Tue Apr 03 12:05:49 2018 +0200
@@ -114,7 +114,7 @@
#include "utilities/vmError.hpp"
#if INCLUDE_ALL_GCS
#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/parallel/pcTasks.hpp"
#endif // INCLUDE_ALL_GCS
#if INCLUDE_JVMCI