author | sangheki |
Fri, 18 Dec 2015 08:17:30 -0800 | |
changeset 35204 | 78a0fd90a70f |
parent 35202 | 506ccf1717fd |
child 35205 | 78ef15d884da |
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Fri Dec 18 08:17:30 2015 -0800 @@ -2742,9 +2742,11 @@ _collector->resetYields(); _collector->resetTimer(); _collector->startTimer(); + _collector->gc_timer_cm()->register_gc_concurrent_start(title); } CMSPhaseAccounting::~CMSPhaseAccounting() { + _collector->gc_timer_cm()->register_gc_concurrent_end(); _collector->stopTimer(); log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks())); log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields()); @@ -5483,46 +5485,48 @@ return; } - // Clear the mark bitmap (no grey objects to start with) - // for the next cycle. - GCTraceCPUTime tcpu; - CMSPhaseAccounting cmspa(this, "Concurrent Reset"); - - HeapWord* curAddr = _markBitMap.startWord(); - while (curAddr < _markBitMap.endWord()) { - size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); - MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); - _markBitMap.clear_large_range(chunk); - if (ConcurrentMarkSweepThread::should_yield() && - !foregroundGCIsActive() && - CMSYield) { - assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), - "CMS thread should hold CMS token"); - assert_lock_strong(bitMapLock()); - bitMapLock()->unlock(); - ConcurrentMarkSweepThread::desynchronize(true); - stopTimer(); - incrementYields(); - - // See the comment in coordinator_yield() - for (unsigned i = 0; i < CMSYieldSleepCount && - ConcurrentMarkSweepThread::should_yield() && - !CMSCollector::foregroundGCIsActive(); ++i) { - os::sleep(Thread::current(), 1, false); + { + // Clear the mark bitmap (no grey objects to start with) + // for the next cycle. + GCTraceCPUTime tcpu; + CMSPhaseAccounting cmspa(this, "Concurrent Reset"); + + HeapWord* curAddr = _markBitMap.startWord(); + while (curAddr < _markBitMap.endWord()) { + size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); + MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); + _markBitMap.clear_large_range(chunk); + if (ConcurrentMarkSweepThread::should_yield() && + !foregroundGCIsActive() && + CMSYield) { + assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), + "CMS thread should hold CMS token"); + assert_lock_strong(bitMapLock()); + bitMapLock()->unlock(); + ConcurrentMarkSweepThread::desynchronize(true); + stopTimer(); + incrementYields(); + + // See the comment in coordinator_yield() + for (unsigned i = 0; i < CMSYieldSleepCount && + ConcurrentMarkSweepThread::should_yield() && + !CMSCollector::foregroundGCIsActive(); ++i) { + os::sleep(Thread::current(), 1, false); + } + + ConcurrentMarkSweepThread::synchronize(true); + bitMapLock()->lock_without_safepoint_check(); + startTimer(); } - - ConcurrentMarkSweepThread::synchronize(true); - bitMapLock()->lock_without_safepoint_check(); - startTimer(); + curAddr = chunk.end(); } - curAddr = chunk.end(); - } - // A successful mostly concurrent collection has been done. - // Because only the full (i.e., concurrent mode failure) collections - // are being measured for gc overhead limits, clean the "near" flag - // and count. - size_policy()->reset_gc_overhead_limit_count(); - _collectorState = Idling; + // A successful mostly concurrent collection has been done. + // Because only the full (i.e., concurrent mode failure) collections + // are being measured for gc overhead limits, clean the "near" flag + // and count. + size_policy()->reset_gc_overhead_limit_count(); + _collectorState = Idling; + } register_gc_end(); }
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Fri Dec 18 08:17:30 2015 -0800 @@ -978,6 +978,8 @@ bool completed_initialization() { return _completed_initialization; } void print_eden_and_survivor_chunk_arrays(); + + ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } }; class CMSExpansionCause : public AllStatic {
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp Fri Dec 18 08:17:30 2015 -0800 @@ -436,6 +436,7 @@ _has_aborted(false), _restart_for_overflow(false), _concurrent_marking_in_progress(false), + _concurrent_phase_started(false), // _verbose_level set below @@ -1003,6 +1004,19 @@ } } +void ConcurrentMark::register_concurrent_phase_start(const char* title) { + assert(!_concurrent_phase_started, "Sanity"); + _concurrent_phase_started = true; + _g1h->gc_timer_cm()->register_gc_concurrent_start(title); +} + +void ConcurrentMark::register_concurrent_phase_end() { + if (_concurrent_phase_started) { + _concurrent_phase_started = false; + _g1h->gc_timer_cm()->register_gc_concurrent_end(); + } +} + void ConcurrentMark::markFromRoots() { // we might be tempted to assert that: // assert(asynch == !SafepointSynchronize::is_at_safepoint(), @@ -2609,6 +2623,10 @@ satb_mq_set.is_active() /* expected_active */); _g1h->trace_heap_after_concurrent_cycle(); + + // Close any open concurrent phase timing + register_concurrent_phase_end(); + _g1h->register_concurrent_cycle_end(); }
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.hpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentMark.hpp Fri Dec 18 08:17:30 2015 -0800 @@ -353,6 +353,9 @@ // time of remark. volatile bool _concurrent_marking_in_progress; + // Keep track of whether we have started concurrent phase or not. + bool _concurrent_phase_started; + // All of these times are in ms NumberSeq _init_times; NumberSeq _remark_times; @@ -516,6 +519,9 @@ _concurrent_marking_in_progress = false; } + void register_concurrent_phase_start(const char* title); + void register_concurrent_phase_end(); + void update_accum_task_vtime(int i, double vtime) { _accum_task_vtime[i] += vtime; }
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp Fri Dec 18 08:17:30 2015 -0800 @@ -90,6 +90,20 @@ os::sleep(this, sleep_time_ms, false); } } + +class GCConcPhaseTimer : StackObj { + ConcurrentMark* _cm; + + public: + GCConcPhaseTimer(ConcurrentMark* cm, const char* title) : _cm(cm) { + _cm->register_concurrent_phase_start(title); + } + + ~GCConcPhaseTimer() { + _cm->register_concurrent_phase_end(); + } +}; + void ConcurrentMarkThread::run() { initialize_in_thread(); wait_for_universe_init(); @@ -127,6 +141,7 @@ // correctness issue. if (!cm()->has_aborted()) { + GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning"); _cm->scanRootRegions(); } @@ -140,6 +155,7 @@ do { iter++; if (!cm()->has_aborted()) { + GCConcPhaseTimer(_cm, "Concurrent Mark"); _cm->markFromRoots(); } @@ -194,6 +210,7 @@ // reclaimed by cleanup. GCTraceConcTime(Info, gc) tt("Concurrent Cleanup"); + GCConcPhaseTimer(_cm, "Concurrent Cleanup"); // Now do the concurrent cleanup operation. _cm->completeCleanup(); @@ -250,6 +267,7 @@ // We may have aborted just before the remark. Do not bother clearing the // bitmap then, as it has been done during mark abort. if (!cm()->has_aborted()) { + GCConcPhaseTimer(_cm, "Concurrent Bitmap Clearing"); _cm->clearNextBitmap(); } else { assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Fri Dec 18 08:17:30 2015 -0800 @@ -2336,7 +2336,7 @@ _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); // Clear state variables to prepare for the next concurrent cycle. - collector_state()->set_concurrent_cycle_started(false); + collector_state()->set_concurrent_cycle_started(false); _heap_summary_sent = false; } }
--- a/hotspot/src/share/vm/gc/shared/gcTimer.cpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/shared/gcTimer.cpp Fri Dec 18 08:17:30 2015 -0800 @@ -69,13 +69,27 @@ } void ConcurrentGCTimer::register_gc_pause_start(const char* name) { + assert(!_is_concurrent_phase_active, "A pause phase can't be started while a concurrent phase is active."); GCTimer::register_gc_pause_start(name); } void ConcurrentGCTimer::register_gc_pause_end() { + assert(!_is_concurrent_phase_active, "A pause phase can't be ended while a concurrent phase is active."); GCTimer::register_gc_pause_end(); } +void ConcurrentGCTimer::register_gc_concurrent_start(const char* name, const Ticks& time) { + assert(!_is_concurrent_phase_active, "A concurrent phase is already active."); + _time_partitions.report_gc_phase_start(name, time, GCPhase::ConcurrentPhaseType); + _is_concurrent_phase_active = true; +} + +void ConcurrentGCTimer::register_gc_concurrent_end(const Ticks& time) { + assert(_is_concurrent_phase_active, "A concurrent phase is not active."); + _time_partitions.report_gc_phase_end(time, GCPhase::ConcurrentPhaseType); + _is_concurrent_phase_active = false; +} + void PhasesStack::clear() { _next_phase_level = 0; } @@ -84,7 +98,6 @@ assert(_next_phase_level < PHASE_LEVELS, "Overflow"); _phase_indices[_next_phase_level] = phase_index; - _next_phase_level++; } @@ -92,7 +105,6 @@ assert(_next_phase_level > 0, "Underflow"); _next_phase_level--; - return _phase_indices[_next_phase_level]; } @@ -100,9 +112,8 @@ return _next_phase_level; } - TimePartitions::TimePartitions() { - _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray<PausePhase>(INITIAL_CAPACITY, true, mtGC); + _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray<GCPhase>(INITIAL_CAPACITY, true, mtGC); clear(); } @@ -118,12 +129,13 @@ _longest_pause = Tickspan(); } -void TimePartitions::report_gc_phase_start(const char* name, const Ticks& time) { +void TimePartitions::report_gc_phase_start(const char* name, const Ticks& time, GCPhase::PhaseType type) { assert(_phases->length() <= 1000, "Too many recored phases?"); int level = _active_phases.count(); - PausePhase phase; + GCPhase phase; + phase.set_type(type); phase.set_level(level); phase.set_name(name); phase.set_start(time); @@ -134,15 +146,14 @@ } void TimePartitions::update_statistics(GCPhase* phase) { - // FIXME: This should only be done for pause phases - if (phase->level() == 0) { + if ((phase->type() == GCPhase::PausePhaseType) && (phase->level() == 0)) { const Tickspan pause = phase->end() - phase->start(); _sum_of_pauses += pause; _longest_pause = MAX2(pause, _longest_pause); } } -void TimePartitions::report_gc_phase_end(const Ticks& time) { +void TimePartitions::report_gc_phase_end(const Ticks& time, GCPhase::PhaseType type) { int phase_index = _active_phases.pop(); GCPhase* phase = _phases->adr_at(phase_index); phase->set_end(time); @@ -187,9 +198,10 @@ many_sub_pause_phases(); many_sub_pause_phases2(); max_nested_pause_phases(); + one_concurrent(); } - static void validate_pause_phase(GCPhase* phase, int level, const char* name, const Ticks& start, const Ticks& end) { + static void validate_gc_phase(GCPhase* phase, int level, const char* name, const Ticks& start, const Ticks& end) { assert(phase->level() == level, "Incorrect level"); assert(strcmp(phase->name(), name) == 0, "Incorrect name"); assert(phase->start() == start, "Incorrect start"); @@ -203,7 +215,7 @@ TimePartitionPhasesIterator iter(&time_partitions); - validate_pause_phase(iter.next(), 0, "PausePhase", 2, 8); + validate_gc_phase(iter.next(), 0, "PausePhase", 2, 8); assert(time_partitions.sum_of_pauses() == Ticks(8) - Ticks(2), "Incorrect"); assert(time_partitions.longest_pause() == Ticks(8) - Ticks(2), "Incorrect"); @@ -219,8 +231,8 @@ TimePartitionPhasesIterator iter(&time_partitions); - validate_pause_phase(iter.next(), 0, "PausePhase1", 2, 3); - validate_pause_phase(iter.next(), 0, "PausePhase2", 4, 6); + validate_gc_phase(iter.next(), 0, "PausePhase1", 2, 3); + validate_gc_phase(iter.next(), 0, "PausePhase2", 4, 6); assert(time_partitions.sum_of_pauses() == Ticks(3) - Ticks(0), "Incorrect"); assert(time_partitions.longest_pause() == Ticks(2) - Ticks(0), "Incorrect"); @@ -237,8 +249,8 @@ TimePartitionPhasesIterator iter(&time_partitions); - validate_pause_phase(iter.next(), 0, "PausePhase", 2, 5); - validate_pause_phase(iter.next(), 1, "SubPhase", 3, 4); + validate_gc_phase(iter.next(), 0, "PausePhase", 2, 5); + validate_gc_phase(iter.next(), 1, "SubPhase", 3, 4); assert(time_partitions.sum_of_pauses() == Ticks(3) - Ticks(0), "Incorrect"); assert(time_partitions.longest_pause() == Ticks(3) - Ticks(0), "Incorrect"); @@ -259,10 +271,10 @@ TimePartitionPhasesIterator iter(&time_partitions); - validate_pause_phase(iter.next(), 0, "PausePhase", 2, 9); - validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8); - validate_pause_phase(iter.next(), 2, "SubPhase2", 4, 7); - validate_pause_phase(iter.next(), 3, "SubPhase3", 5, 6); + validate_gc_phase(iter.next(), 0, "PausePhase", 2, 9); + validate_gc_phase(iter.next(), 1, "SubPhase1", 3, 8); + validate_gc_phase(iter.next(), 2, "SubPhase2", 4, 7); + validate_gc_phase(iter.next(), 3, "SubPhase3", 5, 6); assert(time_partitions.sum_of_pauses() == Ticks(7) - Ticks(0), "Incorrect"); assert(time_partitions.longest_pause() == Ticks(7) - Ticks(0), "Incorrect"); @@ -287,11 +299,11 @@ TimePartitionPhasesIterator iter(&time_partitions); - validate_pause_phase(iter.next(), 0, "PausePhase", 2, 11); - validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 4); - validate_pause_phase(iter.next(), 1, "SubPhase2", 5, 6); - validate_pause_phase(iter.next(), 1, "SubPhase3", 7, 8); - validate_pause_phase(iter.next(), 1, "SubPhase4", 9, 10); + validate_gc_phase(iter.next(), 0, "PausePhase", 2, 11); + validate_gc_phase(iter.next(), 1, "SubPhase1", 3, 4); + validate_gc_phase(iter.next(), 1, "SubPhase2", 5, 6); + validate_gc_phase(iter.next(), 1, "SubPhase3", 7, 8); + validate_gc_phase(iter.next(), 1, "SubPhase4", 9, 10); assert(time_partitions.sum_of_pauses() == Ticks(9) - Ticks(0), "Incorrect"); assert(time_partitions.longest_pause() == Ticks(9) - Ticks(0), "Incorrect"); @@ -322,20 +334,35 @@ TimePartitionPhasesIterator iter(&time_partitions); - validate_pause_phase(iter.next(), 0, "PausePhase", 2, 17); - validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8); - validate_pause_phase(iter.next(), 2, "SubPhase11", 4, 5); - validate_pause_phase(iter.next(), 2, "SubPhase12", 6, 7); - validate_pause_phase(iter.next(), 1, "SubPhase2", 9, 14); - validate_pause_phase(iter.next(), 2, "SubPhase21", 10, 11); - validate_pause_phase(iter.next(), 2, "SubPhase22", 12, 13); - validate_pause_phase(iter.next(), 1, "SubPhase3", 15, 16); + validate_gc_phase(iter.next(), 0, "PausePhase", 2, 17); + validate_gc_phase(iter.next(), 1, "SubPhase1", 3, 8); + validate_gc_phase(iter.next(), 2, "SubPhase11", 4, 5); + validate_gc_phase(iter.next(), 2, "SubPhase12", 6, 7); + validate_gc_phase(iter.next(), 1, "SubPhase2", 9, 14); + validate_gc_phase(iter.next(), 2, "SubPhase21", 10, 11); + validate_gc_phase(iter.next(), 2, "SubPhase22", 12, 13); + validate_gc_phase(iter.next(), 1, "SubPhase3", 15, 16); assert(time_partitions.sum_of_pauses() == Ticks(15) - Ticks(0), "Incorrect"); assert(time_partitions.longest_pause() == Ticks(15) - Ticks(0), "Incorrect"); assert(!iter.has_next(), "Too many elements"); } + + static void one_concurrent() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("ConcurrentPhase", 2, GCPhase::ConcurrentPhaseType); + time_partitions.report_gc_phase_end(8, GCPhase::ConcurrentPhaseType); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_gc_phase(iter.next(), 0, "ConcurrentPhase", 2, 8); + // ConcurrentPhaseType should not affect to both 'sum_of_pauses()' and 'longest_pause()'. + assert(time_partitions.sum_of_pauses() == Tickspan(), "Incorrect"); + assert(time_partitions.longest_pause() == Tickspan(), "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } }; class GCTimerTest {
--- a/hotspot/src/share/vm/gc/shared/gcTimer.hpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/shared/gcTimer.hpp Fri Dec 18 08:17:30 2015 -0800 @@ -39,15 +39,21 @@ class PhaseVisitor { public: virtual void visit(GCPhase* phase) = 0; - virtual void visit(PausePhase* phase) { visit((GCPhase*)phase); } - virtual void visit(ConcurrentPhase* phase) { visit((GCPhase*)phase); } }; class GCPhase { + public: + enum PhaseType { + PausePhaseType = 0, + ConcurrentPhaseType = 1 + }; + + private: const char* _name; int _level; Ticks _start; Ticks _end; + PhaseType _type; public: void set_name(const char* name) { _name = name; } @@ -62,17 +68,9 @@ const Ticks end() const { return _end; } void set_end(const Ticks& time) { _end = time; } - virtual void accept(PhaseVisitor* visitor) = 0; -}; + PhaseType type() const { return _type; } + void set_type(PhaseType type) { _type = type; } -class PausePhase : public GCPhase { - public: - void accept(PhaseVisitor* visitor) { - visitor->visit(this); - } -}; - -class ConcurrentPhase : public GCPhase { void accept(PhaseVisitor* visitor) { visitor->visit(this); } @@ -80,7 +78,7 @@ class PhasesStack { public: - // FIXME: Temporary set to 5 (used to be 4), since Reference processing needs it. + // Set to 5, since Reference processing needs it. static const int PHASE_LEVELS = 5; private: @@ -99,8 +97,7 @@ class TimePartitions { static const int INITIAL_CAPACITY = 10; - // Currently we only support pause phases. - GrowableArray<PausePhase>* _phases; + GrowableArray<GCPhase>* _phases; PhasesStack _active_phases; Tickspan _sum_of_pauses; @@ -111,8 +108,8 @@ ~TimePartitions(); void clear(); - void report_gc_phase_start(const char* name, const Ticks& time); - void report_gc_phase_end(const Ticks& time); + void report_gc_phase_start(const char* name, const Ticks& time, GCPhase::PhaseType type=GCPhase::PausePhaseType); + void report_gc_phase_end(const Ticks& time, GCPhase::PhaseType type=GCPhase::PausePhaseType); int num_phases() const; GCPhase* phase_at(int index) const; @@ -121,6 +118,7 @@ const Tickspan longest_pause() const { return _longest_pause; } bool has_active_phases(); + private: void update_statistics(GCPhase* phase); }; @@ -162,9 +160,18 @@ }; class ConcurrentGCTimer : public GCTimer { + // ConcurrentGCTimer can't be used if there is an overlap between a pause phase and a concurrent phase. + // _is_concurrent_phase_active is used to find above case. + bool _is_concurrent_phase_active; + public: + ConcurrentGCTimer(): GCTimer(), _is_concurrent_phase_active(false) {}; + void register_gc_pause_start(const char* name); void register_gc_pause_end(); + + void register_gc_concurrent_start(const char* name, const Ticks& time = Ticks::now()); + void register_gc_concurrent_end(const Ticks& time = Ticks::now()); }; class TimePartitionPhasesIterator {
--- a/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp Fri Dec 18 08:17:30 2015 -0800 @@ -418,30 +418,46 @@ } class PhaseSender : public PhaseVisitor { + void visit_pause(GCPhase* phase) { + assert(phase->level() < PhasesStack::PHASE_LEVELS, "Need more event types for PausePhase"); + + switch (phase->level()) { + case 0: send_phase<EventGCPhasePause>(phase); break; + case 1: send_phase<EventGCPhasePauseLevel1>(phase); break; + case 2: send_phase<EventGCPhasePauseLevel2>(phase); break; + case 3: send_phase<EventGCPhasePauseLevel3>(phase); break; + default: /* Ignore sending this phase */ break; + } + } + + void visit_concurrent(GCPhase* phase) { + assert(phase->level() < 1, "There is only one level for ConcurrentPhase"); + + switch (phase->level()) { + case 0: send_phase<EventGCPhaseConcurrent>(phase); break; + default: /* Ignore sending this phase */ break; + } + } + public: template<typename T> - void send_phase(PausePhase* pause) { + void send_phase(GCPhase* phase) { T event(UNTIMED); if (event.should_commit()) { event.set_gcId(GCId::current()); - event.set_name(pause->name()); - event.set_starttime(pause->start()); - event.set_endtime(pause->end()); + event.set_name(phase->name()); + event.set_starttime(phase->start()); + event.set_endtime(phase->end()); event.commit(); } } - void visit(GCPhase* pause) { ShouldNotReachHere(); } - void visit(ConcurrentPhase* pause) { Unimplemented(); } - void visit(PausePhase* pause) { - assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types"); - - switch (pause->level()) { - case 0: send_phase<EventGCPhasePause>(pause); break; - case 1: send_phase<EventGCPhasePauseLevel1>(pause); break; - case 2: send_phase<EventGCPhasePauseLevel2>(pause); break; - case 3: send_phase<EventGCPhasePauseLevel3>(pause); break; - default: /* Ignore sending this phase */ break; + void visit(GCPhase* phase) { + if (phase->type() == GCPhase::PausePhaseType) { + visit_pause(phase); + } else { + assert(phase->type() == GCPhase::ConcurrentPhaseType, "Should be ConcurrentPhaseType"); + visit_concurrent(phase); } } };
--- a/hotspot/src/share/vm/trace/trace.xml Mon Dec 21 14:09:21 2015 +0000 +++ b/hotspot/src/share/vm/trace/trace.xml Fri Dec 18 08:17:30 2015 -0800 @@ -460,6 +460,11 @@ <value type="UTF8" field="name" label="Name" /> </event> + <event id="GCPhaseConcurrent" path="vm/gc/phases/concurrent" label="GC Phase Concurrent"> + <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/> + <value type="UTF8" field="name" label="Name" /> + </event> + <!-- Compiler events --> <event id="Compilation" path="vm/compiler/compilation" label="Compilation"