--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Fri Oct 09 14:08:15 2015 -0400
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Fri Oct 09 20:45:45 2015 +0000
@@ -1654,7 +1654,7 @@
_collectorState = Resetting;
assert(_restart_addr == NULL,
"Should have been NULL'd before baton was passed");
- reset(false /* == !concurrent */);
+ reset_stw();
_cmsGen->reset_after_compaction();
_concurrent_cycles_since_last_unload = 0;
@@ -1934,7 +1934,7 @@
}
case Resetting:
// CMS heap resizing has been completed
- reset(true);
+ reset_concurrent();
assert(_collectorState == Idling, "Collector state should "
"have changed");
@@ -5698,68 +5698,71 @@
// Reset CMS data structures (for now just the marking bit map)
// preparatory for the next cycle.
-void CMSCollector::reset(bool concurrent) {
- if (concurrent) {
- CMSTokenSyncWithLocks ts(true, bitMapLock());
-
- // If the state is not "Resetting", the foreground thread
- // has done a collection and the resetting.
- if (_collectorState != Resetting) {
- assert(_collectorState == Idling, "The state should only change"
- " because the foreground collector has finished the collection");
- return;
- }
-
- // Clear the mark bitmap (no grey objects to start with)
- // for the next cycle.
- TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
- CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
-
- HeapWord* curAddr = _markBitMap.startWord();
- while (curAddr < _markBitMap.endWord()) {
- size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
- MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
- _markBitMap.clear_large_range(chunk);
- if (ConcurrentMarkSweepThread::should_yield() &&
- !foregroundGCIsActive() &&
- CMSYield) {
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
- assert_lock_strong(bitMapLock());
- bitMapLock()->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
- stopTimer();
- if (PrintCMSStatistics != 0) {
- incrementYields();
- }
-
- // See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
- os::sleep(Thread::current(), 1, false);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- bitMapLock()->lock_without_safepoint_check();
- startTimer();
+void CMSCollector::reset_concurrent() {
+ CMSTokenSyncWithLocks ts(true, bitMapLock());
+
+ // If the state is not "Resetting", the foreground thread
+ // has done a collection and the resetting.
+ if (_collectorState != Resetting) {
+ assert(_collectorState == Idling, "The state should only change"
+ " because the foreground collector has finished the collection");
+ return;
+ }
+
+ // Clear the mark bitmap (no grey objects to start with)
+ // for the next cycle.
+ TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+ CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
+
+ HeapWord* curAddr = _markBitMap.startWord();
+ while (curAddr < _markBitMap.endWord()) {
+ size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
+ MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
+ _markBitMap.clear_large_range(chunk);
+ if (ConcurrentMarkSweepThread::should_yield() &&
+ !foregroundGCIsActive() &&
+ CMSYield) {
+ assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+ "CMS thread should hold CMS token");
+ assert_lock_strong(bitMapLock());
+ bitMapLock()->unlock();
+ ConcurrentMarkSweepThread::desynchronize(true);
+ stopTimer();
+ if (PrintCMSStatistics != 0) {
+ incrementYields();
}
- curAddr = chunk.end();
- }
- // A successful mostly concurrent collection has been done.
- // Because only the full (i.e., concurrent mode failure) collections
- // are being measured for gc overhead limits, clean the "near" flag
- // and count.
- size_policy()->reset_gc_overhead_limit_count();
- _collectorState = Idling;
- } else {
- // already have the lock
- assert(_collectorState == Resetting, "just checking");
- assert_lock_strong(bitMapLock());
- _markBitMap.clear_all();
- _collectorState = Idling;
- }
-
+
+ // See the comment in coordinator_yield()
+ for (unsigned i = 0; i < CMSYieldSleepCount &&
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive(); ++i) {
+ os::sleep(Thread::current(), 1, false);
+ }
+
+ ConcurrentMarkSweepThread::synchronize(true);
+ bitMapLock()->lock_without_safepoint_check();
+ startTimer();
+ }
+ curAddr = chunk.end();
+ }
+ // A successful mostly concurrent collection has been done.
+ // Because only the full (i.e., concurrent mode failure) collections
+ // are being measured for gc overhead limits, clean the "near" flag
+ // and count.
+ size_policy()->reset_gc_overhead_limit_count();
+ _collectorState = Idling;
+
+ register_gc_end();
+}
+
+// Same as above but for STW paths
+void CMSCollector::reset_stw() {
+ // already have the lock
+ assert(_collectorState == Resetting, "just checking");
+ assert_lock_strong(bitMapLock());
+ GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
+ _markBitMap.clear_all();
+ _collectorState = Idling;
register_gc_end();
}
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Fri Oct 09 14:08:15 2015 -0400
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Fri Oct 09 20:45:45 2015 +0000
@@ -799,8 +799,10 @@
// Concurrent sweeping work
void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
- // (Concurrent) resetting of support data structures
- void reset(bool concurrent);
+ // Concurrent resetting of support data structures
+ void reset_concurrent();
+ // Resetting of support data structures from a STW full GC
+ void reset_stw();
// Clear _expansion_cause fields of constituent generations
void clear_expansion_cause();
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp Fri Oct 09 14:08:15 2015 -0400
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp Fri Oct 09 20:45:45 2015 +0000
@@ -109,7 +109,7 @@
break;
}
- GCIdMark gc_id_mark;
+ assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
{
ResourceMark rm;
HandleMark hm;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Fri Oct 09 14:08:15 2015 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Fri Oct 09 20:45:45 2015 +0000
@@ -2612,15 +2612,18 @@
}
void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
+ GCIdMarkAndRestore conc_gc_id_mark;
collector_state()->set_concurrent_cycle_started(true);
_gc_timer_cm->register_gc_start(start_time);
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
trace_heap_before_gc(_gc_tracer_cm);
+ _cmThread->set_gc_id(GCId::current());
}
void G1CollectedHeap::register_concurrent_cycle_end() {
if (collector_state()->concurrent_cycle_started()) {
+ GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
}
@@ -2643,6 +2646,7 @@
// but before the concurrent cycle end has been registered.
// Make sure that we only send the heap information once.
if (!_heap_summary_sent) {
+ GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
trace_heap_after_gc(_gc_tracer_cm);
_heap_summary_sent = true;
}
--- a/hotspot/src/share/vm/gc/shared/gcId.cpp Fri Oct 09 14:08:15 2015 -0400
+++ b/hotspot/src/share/vm/gc/shared/gcId.cpp Fri Oct 09 20:45:45 2015 +0000
@@ -60,6 +60,11 @@
}
GCIdMarkAndRestore::GCIdMarkAndRestore() : _gc_id(GCId::create()) {
+ _previous_gc_id = GCId::current(); // will assert that the GC Id is not undefined
+ currentNamedthread()->set_gc_id(_gc_id);
+}
+
+GCIdMarkAndRestore::GCIdMarkAndRestore(uint gc_id) : _gc_id(gc_id) {
_previous_gc_id = GCId::current(); // will assert that the GC Id is not undefinied
currentNamedthread()->set_gc_id(_gc_id);
}
--- a/hotspot/src/share/vm/gc/shared/gcId.hpp Fri Oct 09 14:08:15 2015 -0400
+++ b/hotspot/src/share/vm/gc/shared/gcId.hpp Fri Oct 09 20:45:45 2015 +0000
@@ -55,6 +55,7 @@
uint _previous_gc_id;
public:
GCIdMarkAndRestore();
+ GCIdMarkAndRestore(uint gc_id);
~GCIdMarkAndRestore();
};