8134953: Make the GC ID available in a central place
authorbrutisso
Wed, 30 Sep 2015 09:07:21 +0200
changeset 33107 77bf0d2069a3
parent 33106 20c533b9e167
child 33108 6714a3872d8f
8134953: Make the GC ID available in a central place Reviewed-by: pliden, jmasa
hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp
hotspot/src/share/vm/gc/cms/parNewGeneration.cpp
hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp
hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp
hotspot/src/share/vm/gc/cms/yieldingWorkgroup.cpp
hotspot/src/share/vm/gc/g1/concurrentMark.cpp
hotspot/src/share/vm/gc/g1/concurrentMark.hpp
hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp
hotspot/src/share/vm/gc/g1/g1MMUTracker.cpp
hotspot/src/share/vm/gc/g1/g1MMUTracker.hpp
hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp
hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp
hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp
hotspot/src/share/vm/gc/parallel/pcTasks.cpp
hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp
hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp
hotspot/src/share/vm/gc/parallel/psScavenge.cpp
hotspot/src/share/vm/gc/serial/defNewGeneration.cpp
hotspot/src/share/vm/gc/serial/genMarkSweep.cpp
hotspot/src/share/vm/gc/shared/collectedHeap.cpp
hotspot/src/share/vm/gc/shared/gcId.cpp
hotspot/src/share/vm/gc/shared/gcId.hpp
hotspot/src/share/vm/gc/shared/gcTrace.cpp
hotspot/src/share/vm/gc/shared/gcTrace.hpp
hotspot/src/share/vm/gc/shared/gcTraceSend.cpp
hotspot/src/share/vm/gc/shared/gcTraceTime.cpp
hotspot/src/share/vm/gc/shared/gcTraceTime.hpp
hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp
hotspot/src/share/vm/gc/shared/objectCountEventSender.cpp
hotspot/src/share/vm/gc/shared/objectCountEventSender.hpp
hotspot/src/share/vm/gc/shared/referenceProcessor.cpp
hotspot/src/share/vm/gc/shared/referenceProcessor.hpp
hotspot/src/share/vm/gc/shared/workgroup.cpp
hotspot/src/share/vm/gc/shared/workgroup.hpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
hotspot/src/share/vm/utilities/ostream.cpp
hotspot/src/share/vm/utilities/ostream.hpp
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -1593,7 +1593,7 @@
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 
-  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
+  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
 
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
@@ -2825,7 +2825,6 @@
  public:
   CMSPhaseAccounting(CMSCollector *collector,
                      const char *phase,
-                     const GCId gc_id,
                      bool print_cr = true);
   ~CMSPhaseAccounting();
 
@@ -2834,7 +2833,6 @@
   const char *_phase;
   elapsedTimer _wallclock;
   bool _print_cr;
-  const GCId _gc_id;
 
  public:
   // Not MT-safe; so do not pass around these StackObj's
@@ -2850,15 +2848,14 @@
 
 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
                                        const char *phase,
-                                       const GCId gc_id,
                                        bool print_cr) :
-  _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
+  _collector(collector), _phase(phase), _print_cr(print_cr) {
 
   if (PrintCMSStatistics != 0) {
     _collector->resetYields();
   }
   if (PrintGCDetails) {
-    gclog_or_tty->gclog_stamp(_gc_id);
+    gclog_or_tty->gclog_stamp();
     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
       _collector->cmsGen()->short_name(), _phase);
   }
@@ -2872,7 +2869,7 @@
   _collector->stopTimer();
   _wallclock.stop();
   if (PrintGCDetails) {
-    gclog_or_tty->gclog_stamp(_gc_id);
+    gclog_or_tty->gclog_stamp();
     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
                  _collector->cmsGen()->short_name(),
                  _phase, _collector->timerValue(), _wallclock.seconds());
@@ -2951,7 +2948,7 @@
   setup_cms_unloading_and_verification_state();
 
   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
-    PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
+    PrintGCDetails && Verbose, true, _gc_timer_cm);)
 
   // Reset all the PLAB chunk arrays if necessary.
   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@@ -3054,7 +3051,7 @@
 
   CMSTokenSyncWithLocks ts(true, bitMapLock());
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-  CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
+  CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
   bool res = markFromRootsWork();
   if (res) {
     _collectorState = Precleaning;
@@ -3751,7 +3748,7 @@
       _start_sampling = false;
     }
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
+    CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
   }
   CMSTokenSync x(true); // is cms thread
@@ -3780,7 +3777,7 @@
   // we will never do an actual abortable preclean cycle.
   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
+    CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
     // We need more smarts in the abortable preclean
     // loop below to deal with cases where allocation
     // in young gen is very very slow, and our precleaning
@@ -3925,7 +3922,7 @@
     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
     rp->preclean_discovered_references(
           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
-          gc_timer, _gc_tracer_cm->gc_id());
+          gc_timer);
   }
 
   if (clean_survivor) {  // preclean the active survivor space(s)
@@ -4261,7 +4258,7 @@
       // expect it to be false and set to true
       FlagSetting fl(gch->_is_gc_active, false);
       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
-        PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
+        PrintGCDetails && Verbose, true, _gc_timer_cm);)
       gch->do_collection(true,                      // full (i.e. force, see below)
                          false,                     // !clear_all_soft_refs
                          0,                         // size
@@ -4279,7 +4276,7 @@
 }
 
 void CMSCollector::checkpointRootsFinalWork() {
-  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
+  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
 
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
@@ -4329,11 +4326,10 @@
     // the most recent young generation GC, minus those cleaned up by the
     // concurrent precleaning.
     if (CMSParallelRemarkEnabled) {
-      GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+      GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
       do_remark_parallel();
     } else {
-      GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
-                  _gc_timer_cm, _gc_tracer_cm->gc_id());
+      GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);
       do_remark_non_parallel();
     }
   }
@@ -4341,7 +4337,7 @@
   verify_overflow_empty();
 
   {
-    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
+    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
     refProcessingWork();
   }
   verify_work_stacks_empty();
@@ -5116,7 +5112,7 @@
                               NULL,  // space is set further below
                               &_markBitMap, &_markStack, &mrias_cl);
   {
-    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
     // Iterate over the dirty cards, setting the corresponding bits in the
     // mod union table.
     {
@@ -5153,7 +5149,7 @@
     Universe::verify();
   }
   {
-    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
 
     verify_work_stacks_empty();
 
@@ -5175,7 +5171,7 @@
   }
 
   {
-    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
 
     verify_work_stacks_empty();
 
@@ -5194,7 +5190,7 @@
   }
 
   {
-    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
 
     verify_work_stacks_empty();
 
@@ -5403,7 +5399,7 @@
                                 _span, &_markBitMap, &_markStack,
                                 &cmsKeepAliveClosure, false /* !preclean */);
   {
-    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
 
     ReferenceProcessorStats stats;
     if (rp->processing_is_mt()) {
@@ -5428,15 +5424,13 @@
                                         &cmsKeepAliveClosure,
                                         &cmsDrainMarkingStackClosure,
                                         &task_executor,
-                                        _gc_timer_cm,
-                                        _gc_tracer_cm->gc_id());
+                                        _gc_timer_cm);
     } else {
       stats = rp->process_discovered_references(&_is_alive_closure,
                                         &cmsKeepAliveClosure,
                                         &cmsDrainMarkingStackClosure,
                                         NULL,
-                                        _gc_timer_cm,
-                                        _gc_tracer_cm->gc_id());
+                                        _gc_timer_cm);
     }
     _gc_tracer_cm->report_gc_reference_stats(stats);
 
@@ -5447,7 +5441,7 @@
 
   if (should_unload_classes()) {
     {
-      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
 
       // Unload classes and purge the SystemDictionary.
       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@@ -5460,13 +5454,13 @@
     }
 
     {
-      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
       // Clean up unreferenced symbols in symbol table.
       SymbolTable::unlink();
     }
 
     {
-      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
       // Delete entries for dead interned strings.
       StringTable::unlink(&_is_alive_closure);
     }
@@ -5534,7 +5528,7 @@
   _intra_sweep_timer.start();
   {
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
+    CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
     // First sweep the old gen
     {
       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
@@ -5719,7 +5713,7 @@
     // Clear the mark bitmap (no grey objects to start with)
     // for the next cycle.
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
+    CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
 
     HeapWord* curAddr = _markBitMap.startWord();
     while (curAddr < _markBitMap.endWord()) {
@@ -5771,7 +5765,7 @@
 
 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
+  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
   TraceCollectorStats tcs(counters());
 
   switch (op) {
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -26,6 +26,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
@@ -124,6 +125,7 @@
   while (!_should_terminate) {
     sleepBeforeNextCycle();
     if (_should_terminate) break;
+    GCIdMark gc_id_mark;
     GCCause::Cause cause = _collector->_full_gc_requested ?
       _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
     _collector->collect_in_background(cause);
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -896,7 +896,7 @@
     size_policy->minor_collection_begin();
   }
 
-  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
+  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
   // Capture heap used before collection (for printing).
   size_t gch_prev_used = gch->used();
 
@@ -959,13 +959,13 @@
     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
     stats = rp->process_discovered_references(&is_alive, &keep_alive,
                                               &evacuate_followers, &task_executor,
-                                              _gc_timer, _gc_tracer.gc_id());
+                                              _gc_timer);
   } else {
     thread_state_set.flush();
     gch->save_marks();
     stats = rp->process_discovered_references(&is_alive, &keep_alive,
                                               &evacuate_followers, NULL,
-                                              _gc_timer, _gc_tracer.gc_id());
+                                              _gc_timer);
   }
   _gc_tracer.report_gc_reference_stats(stats);
   if (!promotion_failed()) {
--- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -58,7 +58,7 @@
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id());
+    GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@@ -70,7 +70,7 @@
 void VM_CMS_Operation::verify_after_gc() {
   if (VerifyAfterGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id());
+    GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@@ -134,6 +134,7 @@
     return;
   }
   HS_PRIVATE_CMS_INITMARK_BEGIN();
+  GCIdMark gc_id_mark(_gc_id);
 
   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 
@@ -161,6 +162,7 @@
     return;
   }
   HS_PRIVATE_CMS_REMARK_BEGIN();
+  GCIdMark gc_id_mark(_gc_id);
 
   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 
--- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/shared/gcCause.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "runtime/vm_operations.hpp"
 
@@ -53,6 +54,7 @@
  protected:
   CMSCollector*  _collector;                 // associated collector
   bool           _prologue_succeeded;     // whether doit_prologue succeeded
+  uint           _gc_id;
 
   bool lost_race() const;
 
@@ -63,7 +65,8 @@
  public:
   VM_CMS_Operation(CMSCollector* collector):
     _collector(collector),
-    _prologue_succeeded(false) {}
+    _prologue_succeeded(false),
+    _gc_id(GCId::current()) {}
   ~VM_CMS_Operation() {}
 
   // The legal collector state for executing this CMS op.
--- a/hotspot/src/share/vm/gc/cms/yieldingWorkgroup.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/yieldingWorkgroup.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/cms/yieldingWorkgroup.hpp"
+#include "gc/shared/gcId.hpp"
 #include "utilities/macros.hpp"
 
 YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id)
@@ -340,6 +341,7 @@
         // Now, release the gang mutex and do the work.
         {
           MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag);
+          GCIdMark gc_id_mark(data.task()->gc_id());
           data.task()->work(id);   // This might include yielding
         }
         // Reacquire monitor and note completion of this worker
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -41,6 +41,7 @@
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.hpp"
@@ -520,7 +521,6 @@
   _has_overflown(false),
   _concurrent(false),
   _has_aborted(false),
-  _aborted_gc_id(GCId::undefined()),
   _restart_for_overflow(false),
   _concurrent_marking_in_progress(false),
 
@@ -991,7 +991,7 @@
       force_overflow()->update();
 
       if (G1Log::fine()) {
-        gclog_or_tty->gclog_stamp(concurrent_gc_id());
+        gclog_or_tty->gclog_stamp();
         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
       }
     }
@@ -1181,7 +1181,7 @@
   // should not attempt to do any further work.
   if (root_regions()->scan_in_progress()) {
     if (G1Log::fine()) {
-      gclog_or_tty->gclog_stamp(concurrent_gc_id());
+      gclog_or_tty->gclog_stamp();
       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
     }
 
@@ -1195,7 +1195,7 @@
     _parallel_workers->run_task(&task);
 
     if (G1Log::fine()) {
-      gclog_or_tty->gclog_stamp(concurrent_gc_id());
+      gclog_or_tty->gclog_stamp();
       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
     }
 
@@ -1246,8 +1246,7 @@
 
  public:
   G1CMTraceTime(const char* title, bool doit)
-    : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
-        G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
+    : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {
   }
 };
 
@@ -2392,8 +2391,7 @@
                                           &g1_keep_alive,
                                           &g1_drain_mark_stack,
                                           executor,
-                                          g1h->gc_timer_cm(),
-                                          concurrent_gc_id());
+                                          g1h->gc_timer_cm());
     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
 
     // The do_oop work routines of the keep_alive and drain_marking_stack
@@ -2989,8 +2987,6 @@
   }
   _first_overflow_barrier_sync.abort();
   _second_overflow_barrier_sync.abort();
-  _aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
-  assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
   _has_aborted = true;
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@@ -3005,13 +3001,6 @@
   _g1h->register_concurrent_cycle_end();
 }
 
-const GCId& ConcurrentMark::concurrent_gc_id() {
-  if (has_aborted()) {
-    return _aborted_gc_id;
-  }
-  return _g1h->gc_tracer_cm()->gc_id();
-}
-
 static void print_ms_time_info(const char* prefix, const char* name,
                                NumberSeq& ns) {
   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -28,7 +28,6 @@
 #include "classfile/javaClasses.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/heapRegionSet.hpp"
-#include "gc/shared/gcId.hpp"
 #include "gc/shared/taskqueue.hpp"
 
 class G1CollectedHeap;
@@ -425,7 +424,6 @@
   volatile bool           _concurrent;
   // Set at the end of a Full GC so that marking aborts
   volatile bool           _has_aborted;
-  GCId                    _aborted_gc_id;
 
   // Used when remark aborts due to an overflow to indicate that
   // another concurrent marking phase should start
@@ -768,8 +766,6 @@
 
   bool has_aborted()      { return _has_aborted; }
 
-  const GCId& concurrent_gc_id();
-
   // This prints the global/local fingers. It is used for debugging.
   NOT_PRODUCT(void print_finger();)
 
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -30,6 +30,7 @@
 #include "gc/g1/g1MMUTracker.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/vmThread.hpp"
@@ -85,7 +86,7 @@
     SuspendibleThreadSetJoiner sts_joiner(join_sts);
     va_list args;
     va_start(args, fmt);
-    gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
+    gclog_or_tty->gclog_stamp();
     gclog_or_tty->vprint_cr(fmt, args);
     va_end(args);
   }
@@ -108,6 +109,7 @@
       break;
     }
 
+    GCIdMark gc_id_mark;
     {
       ResourceMark rm;
       HandleMark   hm;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -53,6 +53,7 @@
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -1450,6 +1451,7 @@
   gc_timer->register_gc_start();
 
   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
+  GCIdMark gc_id_mark;
   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
 
   SvcGCMarker sgcm(SvcGCMarker::FULL);
@@ -1476,7 +1478,7 @@
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
     {
-      GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
+      GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
       TraceCollectorStats tcs(g1mm()->full_collection_counters());
       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
 
@@ -3894,7 +3896,7 @@
     return;
   }
 
-  gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
+  gclog_or_tty->gclog_stamp();
 
   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
     .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
@@ -3952,6 +3954,7 @@
 
   _gc_timer_stw->register_gc_start();
 
+  GCIdMark gc_id_mark;
   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
 
   SvcGCMarker sgcm(SvcGCMarker::MINOR);
@@ -5501,8 +5504,7 @@
                                               &keep_alive,
                                               &drain_queue,
                                               NULL,
-                                              _gc_timer_stw,
-                                              _gc_tracer_stw->gc_id());
+                                              _gc_timer_stw);
   } else {
     // Parallel reference processing
     assert(rp->num_q() == no_of_gc_workers, "sanity");
@@ -5513,8 +5515,7 @@
                                               &keep_alive,
                                               &drain_queue,
                                               &par_task_executor,
-                                              _gc_timer_stw,
-                                              _gc_tracer_stw->gc_id());
+                                              _gc_timer_stw);
   }
 
   _gc_tracer_stw->report_gc_reference_stats(stats);
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -857,7 +857,7 @@
   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   _prev_collection_pause_end_ms += elapsed_time_ms;
 
-  _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, _g1->gc_tracer_cm()->gc_id());
+  _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
 }
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
@@ -952,8 +952,7 @@
     collector_state()->set_initiate_conc_mark_if_possible(true);
   }
 
-  _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
-                          end_time_sec, _g1->gc_tracer_stw()->gc_id());
+  _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
 
   if (update_stats) {
     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
@@ -1584,7 +1583,7 @@
   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   _prev_collection_pause_end_ms += elapsed_time_ms;
-  _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, _g1->gc_tracer_cm()->gc_id());
+  _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
 }
 
 // Add the heap region at the head of the non-incremental collection set
--- a/hotspot/src/share/vm/gc/g1/g1MMUTracker.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1MMUTracker.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -76,7 +76,7 @@
   return gc_time;
 }
 
-void G1MMUTrackerQueue::add_pause(double start, double end, const GCId& gcId) {
+void G1MMUTrackerQueue::add_pause(double start, double end) {
   double duration = end - start;
 
   remove_expired_entries(end);
@@ -106,7 +106,7 @@
 
   // Current entry needs to be added before calculating the value
   double slice_time = calculate_gc_time(end);
-  G1MMUTracer::report_mmu(gcId, _time_slice, slice_time, _max_gc_time);
+  G1MMUTracer::report_mmu(_time_slice, slice_time, _max_gc_time);
 }
 
 // basically the _internal call does not remove expired entries
--- a/hotspot/src/share/vm/gc/g1/g1MMUTracker.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1MMUTracker.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -43,7 +43,7 @@
 public:
   G1MMUTracker(double time_slice, double max_gc_time);
 
-  virtual void add_pause(double start, double end, const GCId& gcId) = 0;
+  virtual void add_pause(double start, double end) = 0;
   virtual double when_sec(double current_time, double pause_time) = 0;
 
   double max_gc_time() {
@@ -127,7 +127,7 @@
 public:
   G1MMUTrackerQueue(double time_slice, double max_gc_time);
 
-  virtual void add_pause(double start, double end, const GCId& gcId);
+  virtual void add_pause(double start, double end);
 
   virtual double when_sec(double current_time, double pause_time);
 };
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -121,7 +121,7 @@
 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
                                     bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
+  GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
@@ -146,8 +146,7 @@
                                       &GenMarkSweep::keep_alive,
                                       &GenMarkSweep::follow_stack_closure,
                                       NULL,
-                                      gc_timer(),
-                                      gc_tracer()->gc_id());
+                                      gc_timer());
   gc_tracer()->report_gc_reference_stats(stats);
 
 
@@ -200,7 +199,7 @@
   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   // tracking expects us to do so. See comment under phase4.
 
-  GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
+  GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
 
   prepare_compaction();
 }
@@ -233,7 +232,7 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
+  GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
 
   // Need cleared claim bits for the roots processing
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -294,7 +293,7 @@
   // to use a higher index (saved from phase2) when verifying perm_gen.
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
+  GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
 
   G1SpaceCompactClosure blk;
   g1h->heap_region_iterate(&blk);
--- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -26,6 +26,7 @@
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/g1/g1Log.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcTimer.hpp"
@@ -227,7 +228,8 @@
 void VM_CGC_Operation::doit() {
   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id());
+  GCIdMark gc_id_mark(_gc_id);
+  GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm());
   IsGCActiveMark x;
   _cl->do_void();
 }
--- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP
 
 #include "gc/g1/g1AllocationContext.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 
 // VM_operations for the G1 collector.
@@ -104,6 +105,7 @@
   VoidClosure* _cl;
   const char* _printGCMessage;
   bool _needs_pll;
+  uint _gc_id;
 
 protected:
   // java.lang.ref.Reference support
@@ -112,7 +114,7 @@
 
 public:
   VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll)
-    : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll) { }
+    : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll), _gc_id(GCId::current()) { }
   virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
   virtual void doit();
   virtual bool doit_prologue();
--- a/hotspot/src/share/vm/gc/parallel/pcTasks.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/parallel/pcTasks.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -53,7 +53,7 @@
   ResourceMark rm;
 
   NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
 
@@ -82,7 +82,7 @@
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
@@ -153,7 +153,7 @@
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("RefProcTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
@@ -209,7 +209,7 @@
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -241,7 +241,7 @@
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -308,7 +308,7 @@
 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
 
   NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -323,7 +323,7 @@
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
--- a/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -36,6 +36,7 @@
 #include "gc/serial/markSweep.hpp"
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -113,6 +114,7 @@
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   GCCause::Cause gc_cause = heap->gc_cause();
 
+  GCIdMark gc_id_mark;
   _gc_timer->register_gc_start();
   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 
@@ -165,7 +167,7 @@
     HandleMark hm;
 
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
+    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
@@ -508,7 +510,7 @@
 
 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
+  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
@@ -541,7 +543,7 @@
     ref_processor()->setup_policy(clear_all_softrefs);
     const ReferenceProcessorStats& stats =
       ref_processor()->process_discovered_references(
-        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id());
+        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
     gc_tracer()->report_gc_reference_stats(stats);
   }
 
@@ -567,7 +569,7 @@
 
 
 void PSMarkSweep::mark_sweep_phase2() {
-  GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
+  GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
 
   // Now all live objects are marked, compute the new object addresses.
 
@@ -594,7 +596,7 @@
 
 void PSMarkSweep::mark_sweep_phase3() {
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
+  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSYoungGen* young_gen = heap->young_gen();
@@ -634,7 +636,7 @@
 
 void PSMarkSweep::mark_sweep_phase4() {
   EventMark m("4 compact heap");
-  GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
+  GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
 
   // All pointers are now adjusted, move objects accordingly
 
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -40,6 +40,7 @@
 #include "gc/parallel/psYoungGen.hpp"
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -960,7 +961,7 @@
   // at each young gen gc.  Do the update unconditionally (even though a
   // promotion failure does not swap spaces) because an unknown number of young
   // collections will have swapped the spaces an unknown number of times.
-  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
@@ -1003,7 +1004,7 @@
 
 void PSParallelCompact::post_compact()
 {
-  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
 
   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
     // Clear the marking bitmap, summary data and split info.
@@ -1824,7 +1825,7 @@
 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
                                       bool maximum_compaction)
 {
-  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
   // trace("2");
 
 #ifdef  ASSERT
@@ -1984,6 +1985,7 @@
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
+  GCIdMark gc_id_mark;
   _gc_timer.register_gc_start();
   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 
@@ -2031,7 +2033,7 @@
     gc_task_manager()->task_idle_workers();
 
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
+    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
@@ -2331,7 +2333,7 @@
                                       bool maximum_heap_compaction,
                                       ParallelOldTracer *gc_tracer) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
@@ -2346,7 +2348,7 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   {
-    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
 
     ParallelScavengeHeap::ParStrongRootsScope psrs;
 
@@ -2375,24 +2377,24 @@
 
   // Process reference objects found during marking
   {
-    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
 
     ReferenceProcessorStats stats;
     if (ref_processor()->processing_is_mt()) {
       RefProcTaskExecutor task_executor;
       stats = ref_processor()->process_discovered_references(
         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
-        &task_executor, &_gc_timer, _gc_tracer.gc_id());
+        &task_executor, &_gc_timer);
     } else {
       stats = ref_processor()->process_discovered_references(
         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
-        &_gc_timer, _gc_tracer.gc_id());
+        &_gc_timer);
     }
 
     gc_tracer->report_gc_reference_stats(stats);
   }
 
-  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
 
   // This is the point where the entire marking should have completed.
   assert(cm->marking_stacks_empty(), "Marking should have completed");
@@ -2423,7 +2425,7 @@
 
 void PSParallelCompact::adjust_roots() {
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
 
   // Need new claim bits when tracing through and adjusting pointers.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -2459,7 +2461,7 @@
 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
                                                       uint parallel_gc_threads)
 {
-  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
 
   // Find the threads that are active
   unsigned int which = 0;
@@ -2533,7 +2535,7 @@
 
 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
                                                     uint parallel_gc_threads) {
-  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
 
   ParallelCompactData& sd = PSParallelCompact::summary_data();
 
@@ -2615,7 +2617,7 @@
                                      GCTaskQueue* q,
                                      ParallelTaskTerminator* terminator_ptr,
                                      uint parallel_gc_threads) {
-  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
 
   // Once a thread has drained it's stack, it should try to steal regions from
   // other threads.
@@ -2663,7 +2665,7 @@
 
 void PSParallelCompact::compact() {
   // trace("5");
-  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSOldGen* old_gen = heap->old_gen();
@@ -2679,7 +2681,7 @@
   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
 
   {
-    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
 
     gc_task_manager()->execute_and_wait(q);
 
@@ -2693,7 +2695,7 @@
 
   {
     // Update the deferred objects, if any.  Any compaction manager can be used.
-    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
+    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
       update_deferred_objects(cm, SpaceId(id));
--- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -36,6 +36,7 @@
 #include "gc/shared/collectorPolicy.hpp"
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -278,6 +279,7 @@
     return false;
   }
 
+  GCIdMark gc_id_mark;
   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 
   bool promotion_failure_occurred = false;
@@ -322,7 +324,7 @@
     HandleMark hm;
 
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
+    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 
@@ -387,7 +389,7 @@
     // We'll use the promotion manager again later.
     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
     {
-      GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id());
+      GCTraceTime tm("Scavenge", false, false, &_gc_timer);
       ParallelScavengeHeap::ParStrongRootsScope psrs;
 
       GCTaskQueue* q = GCTaskQueue::create();
@@ -429,7 +431,7 @@
 
     // Process reference objects discovered during scavenge
     {
-      GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id());
+      GCTraceTime tm("References", false, false, &_gc_timer);
 
       reference_processor()->setup_policy(false); // not always_clear
       reference_processor()->set_active_mt_degree(active_workers);
@@ -440,10 +442,10 @@
         PSRefProcTaskExecutor task_executor;
         stats = reference_processor()->process_discovered_references(
           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
-          &_gc_timer, _gc_tracer.gc_id());
+          &_gc_timer);
       } else {
         stats = reference_processor()->process_discovered_references(
-          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id());
+          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
       }
 
       _gc_tracer.report_gc_reference_stats(stats);
@@ -458,7 +460,7 @@
     }
 
     {
-      GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id());
+      GCTraceTime tm("StringTable", false, false, &_gc_timer);
       // Unlink any dead interned Strings and process the remaining live ones.
       PSScavengeRootsClosure root_closure(promotion_manager);
       StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
@@ -628,7 +630,7 @@
     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 
     {
-      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id());
+      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
 
       CodeCache::prune_scavenge_root_nmethods();
     }
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -583,7 +583,7 @@
 
   init_assuming_no_promotion_failure();
 
-  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
+  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
   // Capture heap used before collection (for printing).
   size_t gch_prev_used = gch->used();
 
@@ -646,7 +646,7 @@
   rp->setup_policy(clear_all_soft_refs);
   const ReferenceProcessorStats& stats =
   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
-                                    NULL, _gc_timer, gc_tracer.gc_id());
+                                    NULL, _gc_timer);
   gc_tracer.report_gc_reference_stats(stats);
 
   if (!_promotion_failed) {
--- a/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -70,7 +70,7 @@
   set_ref_processor(rp);
   rp->setup_policy(clear_all_softrefs);
 
-  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
+  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
 
   gch->trace_heap_before_gc(_gc_tracer);
 
@@ -186,7 +186,7 @@
 
 void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
+  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer);
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
@@ -217,7 +217,7 @@
     ref_processor()->setup_policy(clear_all_softrefs);
     const ReferenceProcessorStats& stats =
       ref_processor()->process_discovered_references(
-        &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id());
+        &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
     gc_tracer()->report_gc_reference_stats(stats);
   }
 
@@ -259,7 +259,7 @@
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
+  GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer);
 
   gch->prepare_for_compaction();
 }
@@ -275,7 +275,7 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
+  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer);
 
   // Need new claim bits for the pointer adjustment tracing.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -327,7 +327,7 @@
   // to use a higher index (saved from phase2) when verifying perm_gen.
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
+  GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
 
   GenCompactClosure blk;
   gch->generation_iterate(&blk, true);
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -573,13 +573,13 @@
 
 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
   if (HeapDumpBeforeFullGC) {
-    GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create());
+    GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
     // We are doing a full collection and a heap dump before
     // full collection has been requested.
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramBeforeFullGC) {
-    GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create());
+    GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
     inspector.doit();
   }
@@ -587,11 +587,11 @@
 
 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
   if (HeapDumpAfterFullGC) {
-    GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create());
+    GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramAfterFullGC) {
-    GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create());
+    GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
     inspector.doit();
   }
--- a/hotspot/src/share/vm/gc/shared/gcId.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/gcId.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -25,18 +25,37 @@
 #include "precompiled.hpp"
 #include "gc/shared/gcId.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
 
 uint GCId::_next_id = 0;
 
-const GCId GCId::create() {
-  return GCId(_next_id++);
+NamedThread* currentNamedthread() {
+  assert(Thread::current()->is_Named_thread(), "This thread must be NamedThread");
+  return (NamedThread*)Thread::current();
 }
-const GCId GCId::peek() {
-  return GCId(_next_id);
+
+const uint GCId::create() {
+  return _next_id++;
+}
+
+const uint GCId::current() {
+  assert(currentNamedthread()->gc_id() != undefined(), "Using undefined GC id.");
+  return current_raw();
 }
-const GCId GCId::undefined() {
-  return GCId(UNDEFINED);
+
+const uint GCId::current_raw() {
+  return currentNamedthread()->gc_id();
+}
+
+GCIdMark::GCIdMark() : _gc_id(GCId::create()) {
+  currentNamedthread()->set_gc_id(_gc_id);
 }
-bool GCId::is_undefined() const {
-  return _id == UNDEFINED;
+
+GCIdMark::GCIdMark(uint gc_id) : _gc_id(gc_id) {
+  currentNamedthread()->set_gc_id(_gc_id);
 }
+
+GCIdMark::~GCIdMark() {
+  currentNamedthread()->set_gc_id(GCId::undefined());
+}
+
--- a/hotspot/src/share/vm/gc/shared/gcId.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/gcId.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -27,25 +27,26 @@
 
 #include "memory/allocation.hpp"
 
-class GCId VALUE_OBJ_CLASS_SPEC {
- private:
-  uint _id;
-  GCId(uint id) : _id(id) {}
-  GCId() { } // Unused
-
+class GCId : public AllStatic {
+  friend class GCIdMark;
   static uint _next_id;
   static const uint UNDEFINED = (uint)-1;
+  static const uint create();
 
  public:
-  uint id() const {
-    assert(_id != UNDEFINED, "Using undefined GC ID");
-    return _id;
-  }
-  bool is_undefined() const;
+  // Returns the currently active GC id. Asserts that there is an active GC id.
+  static const uint current();
+  // Same as current() but can return undefined() if no GC id is currently active
+  static const uint current_raw();
+  static const uint undefined() { return UNDEFINED; }
+};
 
-  static const GCId create();
-  static const GCId peek();
-  static const GCId undefined();
+class GCIdMark : public StackObj {
+  uint _gc_id;
+ public:
+  GCIdMark();
+  GCIdMark(uint gc_id);
+  ~GCIdMark();
 };
 
 #endif // SHARE_VM_GC_SHARED_GCID_HPP
--- a/hotspot/src/share/vm/gc/shared/gcTrace.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/gcTrace.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -40,31 +40,16 @@
 #include "gc/g1/evacuationInfo.hpp"
 #endif
 
-#define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?")
-#define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?")
-
 void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
-  assert_unset_gc_id();
-
-  GCId gc_id = GCId::create();
-  _shared_gc_info.set_gc_id(gc_id);
   _shared_gc_info.set_cause(cause);
   _shared_gc_info.set_start_timestamp(timestamp);
 }
 
 void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) {
-  assert_unset_gc_id();
-
   report_gc_start_impl(cause, timestamp);
 }
 
-bool GCTracer::has_reported_gc_start() const {
-  return !_shared_gc_info.gc_id().is_undefined();
-}
-
 void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
-  assert_set_gc_id();
-
   _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
   _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
   _shared_gc_info.set_end_timestamp(timestamp);
@@ -74,16 +59,10 @@
 }
 
 void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) {
-  assert_set_gc_id();
-
   report_gc_end_impl(timestamp, time_partitions);
-
-  _shared_gc_info.set_gc_id(GCId::undefined());
 }
 
 void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
-  assert_set_gc_id();
-
   send_reference_stats_event(REF_SOFT, rps.soft_count());
   send_reference_stats_event(REF_WEAK, rps.weak_count());
   send_reference_stats_event(REF_FINAL, rps.final_count());
@@ -92,14 +71,12 @@
 
 #if INCLUDE_SERVICES
 class ObjectCountEventSenderClosure : public KlassInfoClosure {
-  const GCId _gc_id;
   const double _size_threshold_percentage;
   const size_t _total_size_in_words;
   const Ticks _timestamp;
 
  public:
-  ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, const Ticks& timestamp) :
-    _gc_id(gc_id),
+  ObjectCountEventSenderClosure(size_t total_size_in_words, const Ticks& timestamp) :
     _size_threshold_percentage(ObjectCountCutOffPercent / 100),
     _total_size_in_words(total_size_in_words),
     _timestamp(timestamp)
@@ -107,7 +84,7 @@
 
   virtual void do_cinfo(KlassInfoEntry* entry) {
     if (should_send_event(entry)) {
-      ObjectCountEventSender::send(entry, _gc_id, _timestamp);
+      ObjectCountEventSender::send(entry, _timestamp);
     }
   }
 
@@ -119,7 +96,6 @@
 };
 
 void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
-  assert_set_gc_id();
   assert(is_alive_cl != NULL, "Must supply function to check liveness");
 
   if (ObjectCountEventSender::should_send_event()) {
@@ -129,7 +105,7 @@
     if (!cit.allocation_failed()) {
       HeapInspection hi(false, false, false, NULL);
       hi.populate_table(&cit, is_alive_cl);
-      ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now());
+      ObjectCountEventSenderClosure event_sender(cit.size_of_instances_in_words(), Ticks::now());
       cit.iterate(&event_sender);
     }
   }
@@ -137,14 +113,10 @@
 #endif // INCLUDE_SERVICES
 
 void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
-  assert_set_gc_id();
-
   send_gc_heap_summary_event(when, heap_summary);
 }
 
 void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const {
-  assert_set_gc_id();
-
   send_meta_space_summary_event(when, summary);
 
   send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
@@ -154,7 +126,6 @@
 }
 
 void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
-  assert_set_gc_id();
   assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
 
   GCTracer::report_gc_end_impl(timestamp, time_partitions);
@@ -164,8 +135,6 @@
 }
 
 void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const {
-  assert_set_gc_id();
-
   send_promotion_failed_event(pf_info);
 }
 
@@ -189,78 +158,56 @@
 void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
                                                        uint age, bool tenured,
                                                        size_t plab_size) const {
-  assert_set_gc_id();
   send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
 }
 
 void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
                                                         uint age, bool tenured) const {
-  assert_set_gc_id();
   send_promotion_outside_plab_event(klass, obj_size, age, tenured);
 }
 
 void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
-  assert_set_gc_id();
-
   GCTracer::report_gc_end_impl(timestamp, time_partitions);
   send_old_gc_event();
 }
 
 void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
-  assert_set_gc_id();
-
   OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
   send_parallel_old_event();
 }
 
 void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {
-  assert_set_gc_id();
-
   _parallel_old_gc_info.report_dense_prefix(dense_prefix);
 }
 
 void OldGCTracer::report_concurrent_mode_failure() {
-  assert_set_gc_id();
-
   send_concurrent_mode_failure_event();
 }
 
 #if INCLUDE_ALL_GCS
-void G1MMUTracer::report_mmu(const GCId& gcId, double timeSlice, double gcTime, double maxTime) {
-  assert(!gcId.is_undefined(), "Undefined GC id");
-
-  send_g1_mmu_event(gcId, timeSlice, gcTime, maxTime);
+void G1MMUTracer::report_mmu(double timeSlice, double gcTime, double maxTime) {
+  send_g1_mmu_event(timeSlice, gcTime, maxTime);
 }
 
 void G1NewTracer::report_yc_type(G1YCType type) {
-  assert_set_gc_id();
-
   _g1_young_gc_info.set_type(type);
 }
 
 void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
-  assert_set_gc_id();
-
   YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
   send_g1_young_gc_event();
 }
 
 void G1NewTracer::report_evacuation_info(EvacuationInfo* info) {
-  assert_set_gc_id();
-
   send_evacuation_info_event(info);
 }
 
 void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) {
-  assert_set_gc_id();
-
   send_evacuation_failed_event(ef_info);
   ef_info.reset();
 }
 
 void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summary, const G1EvacSummary& old_summary) const {
-  assert_set_gc_id();
-
   send_young_evacuation_statistics(young_summary);
   send_old_evacuation_statistics(old_summary);
 }
--- a/hotspot/src/share/vm/gc/shared/gcTrace.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/gcTrace.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -52,7 +52,6 @@
 
 class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
  private:
-  GCId _gc_id;
   GCName _name;
   GCCause::Cause _cause;
   Ticks     _start_timestamp;
@@ -62,7 +61,6 @@
 
  public:
   SharedGCInfo(GCName name) :
-    _gc_id(GCId::undefined()),
     _name(name),
     _cause(GCCause::_last_gc_cause),
     _start_timestamp(),
@@ -71,9 +69,6 @@
     _longest_pause() {
   }
 
-  void set_gc_id(GCId gc_id) { _gc_id = gc_id; }
-  const GCId& gc_id() const { return _gc_id; }
-
   void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; }
   const Ticks start_timestamp() const { return _start_timestamp; }
 
@@ -128,8 +123,6 @@
   void report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& metaspace_summary) const;
   void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
   void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
-  bool has_reported_gc_start() const;
-  const GCId& gc_id() { return _shared_gc_info.gc_id(); }
 
  protected:
   GCTracer(GCName name) : _shared_gc_info(name) {}
@@ -242,10 +235,10 @@
 
 #if INCLUDE_ALL_GCS
 class G1MMUTracer : public AllStatic {
-  static void send_g1_mmu_event(const GCId& gcId, double timeSlice, double gcTime, double maxTime);
+  static void send_g1_mmu_event(double timeSlice, double gcTime, double maxTime);
 
  public:
-  static void report_mmu(const GCId& gcId, double timeSlice, double gcTime, double maxTime);
+  static void report_mmu(double timeSlice, double gcTime, double maxTime);
 };
 
 class G1NewTracer : public YoungGCTracer {
--- a/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -44,7 +44,7 @@
 void GCTracer::send_garbage_collection_event() const {
   EventGCGarbageCollection event(UNTIMED);
   if (event.should_commit()) {
-    event.set_gcId(_shared_gc_info.gc_id().id());
+    event.set_gcId(GCId::current());
     event.set_name(_shared_gc_info.name());
     event.set_cause((u2) _shared_gc_info.cause());
     event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
@@ -58,7 +58,7 @@
 void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
   EventGCReferenceStatistics e;
   if (e.should_commit()) {
-      e.set_gcId(_shared_gc_info.gc_id().id());
+      e.set_gcId(GCId::current());
       e.set_type((u1)type);
       e.set_count(count);
       e.commit();
@@ -69,7 +69,7 @@
                                                       const MetaspaceChunkFreeListSummary& summary) const {
   EventMetaspaceChunkFreeListSummary e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_when(when);
     e.set_metadataType(mdtype);
 
@@ -92,7 +92,7 @@
 void ParallelOldTracer::send_parallel_old_event() const {
   EventGCParallelOld e(UNTIMED);
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
     e.set_starttime(_shared_gc_info.start_timestamp());
     e.set_endtime(_shared_gc_info.end_timestamp());
@@ -103,7 +103,7 @@
 void YoungGCTracer::send_young_gc_event() const {
   EventGCYoungGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_tenuringThreshold(_tenuring_threshold);
     e.set_starttime(_shared_gc_info.start_timestamp());
     e.set_endtime(_shared_gc_info.end_timestamp());
@@ -125,7 +125,7 @@
 
   EventPromoteObjectInNewPLAB event;
   if (event.should_commit()) {
-    event.set_gcId(_shared_gc_info.gc_id().id());
+    event.set_gcId(GCId::current());
     event.set_class(klass);
     event.set_objectSize(obj_size);
     event.set_tenured(tenured);
@@ -140,7 +140,7 @@
 
   EventPromoteObjectOutsidePLAB event;
   if (event.should_commit()) {
-    event.set_gcId(_shared_gc_info.gc_id().id());
+    event.set_gcId(GCId::current());
     event.set_class(klass);
     event.set_objectSize(obj_size);
     event.set_tenured(tenured);
@@ -152,7 +152,7 @@
 void OldGCTracer::send_old_gc_event() const {
   EventGCOldGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_starttime(_shared_gc_info.start_timestamp());
     e.set_endtime(_shared_gc_info.end_timestamp());
     e.commit();
@@ -171,7 +171,7 @@
 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
   EventPromotionFailed e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_data(to_trace_struct(pf_info));
     e.set_thread(pf_info.thread()->thread_id());
     e.commit();
@@ -182,7 +182,7 @@
 void OldGCTracer::send_concurrent_mode_failure_event() {
   EventConcurrentModeFailure e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.commit();
   }
 }
@@ -191,7 +191,7 @@
 void G1NewTracer::send_g1_young_gc_event() {
   EventGCG1GarbageCollection e(UNTIMED);
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_type(_g1_young_gc_info.type());
     e.set_starttime(_shared_gc_info.start_timestamp());
     e.set_endtime(_shared_gc_info.end_timestamp());
@@ -199,10 +199,10 @@
   }
 }
 
-void G1MMUTracer::send_g1_mmu_event(const GCId& gcId, double timeSlice, double gcTime, double maxTime) {
+void G1MMUTracer::send_g1_mmu_event(double timeSlice, double gcTime, double maxTime) {
   EventGCG1MMU e;
   if (e.should_commit()) {
-    e.set_gcId(gcId.id());
+    e.set_gcId(GCId::current());
     e.set_timeSlice(timeSlice);
     e.set_gcTime(gcTime);
     e.set_maxGcTime(maxTime);
@@ -213,7 +213,7 @@
 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
   EventEvacuationInfo e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_cSetRegions(info->collectionset_regions());
     e.set_cSetUsedBefore(info->collectionset_used_before());
     e.set_cSetUsedAfter(info->collectionset_used_after());
@@ -229,7 +229,7 @@
 void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
   EventEvacuationFailed e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_data(to_trace_struct(ef_info));
     e.commit();
   }
@@ -253,7 +253,7 @@
 void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
   EventGCG1EvacuationYoungStatistics surv_evt;
   if (surv_evt.should_commit()) {
-    surv_evt.set_stats(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
+    surv_evt.set_stats(create_g1_evacstats(GCId::current(), summary));
     surv_evt.commit();
   }
 }
@@ -261,7 +261,7 @@
 void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
   EventGCG1EvacuationOldStatistics old_evt;
   if (old_evt.should_commit()) {
-    old_evt.set_stats(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
+    old_evt.set_stats(create_g1_evacstats(GCId::current(), summary));
     old_evt.commit();
   }
 }
@@ -287,17 +287,16 @@
 }
 
 class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
-  GCId _gc_id;
   GCWhen::Type _when;
  public:
-  GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {}
+  GCHeapSummaryEventSender(GCWhen::Type when) : _when(when) {}
 
   void visit(const GCHeapSummary* heap_summary) const {
     const VirtualSpaceSummary& heap_space = heap_summary->heap();
 
     EventGCHeapSummary e;
     if (e.should_commit()) {
-      e.set_gcId(_gc_id.id());
+      e.set_gcId(GCId::current());
       e.set_when((u1)_when);
       e.set_heapSpace(to_trace_struct(heap_space));
       e.set_heapUsed(heap_summary->used());
@@ -310,7 +309,7 @@
 
     EventG1HeapSummary e;
     if (e.should_commit()) {
-      e.set_gcId(_gc_id.id());
+      e.set_gcId(GCId::current());
       e.set_when((u1)_when);
       e.set_edenUsedSize(g1_heap_summary->edenUsed());
       e.set_edenTotalSize(g1_heap_summary->edenCapacity());
@@ -331,7 +330,7 @@
 
     EventPSHeapSummary e;
     if (e.should_commit()) {
-      e.set_gcId(_gc_id.id());
+      e.set_gcId(GCId::current());
       e.set_when((u1)_when);
 
       e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
@@ -346,7 +345,7 @@
 };
 
 void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
-  GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when);
+  GCHeapSummaryEventSender visitor(when);
   heap_summary.accept(&visitor);
 }
 
@@ -363,7 +362,7 @@
 void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
   EventMetaspaceSummary e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.gc_id().id());
+    e.set_gcId(GCId::current());
     e.set_when((u1) when);
     e.set_gcThreshold(meta_space_summary.capacity_until_GC());
     e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
@@ -374,15 +373,12 @@
 }
 
 class PhaseSender : public PhaseVisitor {
-  GCId _gc_id;
  public:
-  PhaseSender(GCId gc_id) : _gc_id(gc_id) {}
-
   template<typename T>
   void send_phase(PausePhase* pause) {
     T event(UNTIMED);
     if (event.should_commit()) {
-      event.set_gcId(_gc_id.id());
+      event.set_gcId(GCId::current());
       event.set_name(pause->name());
       event.set_starttime(pause->start());
       event.set_endtime(pause->end());
@@ -406,7 +402,7 @@
 };
 
 void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
-  PhaseSender phase_reporter(_shared_gc_info.gc_id());
+  PhaseSender phase_reporter;
 
   TimePartitionPhasesIterator iter(time_partitions);
   while (iter.has_next()) {
--- a/hotspot/src/share/vm/gc/shared/gcTraceTime.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/gcTraceTime.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -35,7 +35,7 @@
 #include "utilities/ticks.inline.hpp"
 
 
-GCTraceTimeImpl::GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) :
+GCTraceTimeImpl::GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer) :
     _title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() {
   if (_doit || _timer != NULL) {
     _start_counter.stamp();
@@ -49,11 +49,7 @@
   }
 
   if (_doit) {
-    gclog_or_tty->date_stamp(PrintGCDateStamps);
-    gclog_or_tty->stamp(PrintGCTimeStamps);
-    if (PrintGCID) {
-      gclog_or_tty->print("#%u: ", gc_id.id());
-    }
+    gclog_or_tty->gclog_stamp();
     gclog_or_tty->print("[%s", title);
     gclog_or_tty->flush();
   }
--- a/hotspot/src/share/vm/gc/shared/gcTraceTime.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/gcTraceTime.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -40,7 +40,7 @@
   Ticks _start_counter;
 
  public:
-  GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id);
+  GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer);
   ~GCTraceTimeImpl();
 };
 
@@ -48,8 +48,8 @@
   GCTraceTimeImpl _gc_trace_time_impl;
 
  public:
-  GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) :
-    _gc_trace_time_impl(title, doit, print_cr, timer, gc_id) {};
+  GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
+    _gc_trace_time_impl(title, doit, print_cr, timer) {};
 };
 
 #endif // SHARE_VM_GC_SHARED_GCTRACETIME_HPP
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -30,6 +30,7 @@
 #include "code/icBuffer.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/collectorCounters.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.hpp"
@@ -315,9 +316,7 @@
                                           bool restore_marks_for_biased_locking) {
   // Timer for individual generations. Last argument is false: no CR
   // FIXME: We should try to start the timing earlier to cover more of the GC pause
-  // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
-  // so we can assume here that the next GC id is what we want.
-  GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
+  GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL);
   TraceCollectorStats tcs(gen->counters());
   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 
@@ -434,6 +433,8 @@
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   }
 
+  GCIdMark gc_id_mark;
+
   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
                           collector_policy()->should_clear_all_soft_refs();
 
@@ -449,9 +450,7 @@
     bool complete = full && (max_generation == OldGen);
     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
-    // so we can assume here that the next GC id is what we want.
-    GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
+    GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
 
     gc_prologue(complete);
     increment_total_collections(complete);
@@ -489,6 +488,7 @@
     bool must_restore_marks_for_biased_locking = false;
 
     if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
+      GCIdMark gc_id_mark;
       if (!complete) {
         // The full_collections increment was missed above.
         increment_total_full_collections();
--- a/hotspot/src/share/vm/gc/shared/objectCountEventSender.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/objectCountEventSender.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -33,13 +33,13 @@
 #include "utilities/ticks.hpp"
 #if INCLUDE_SERVICES
 
-void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) {
+void ObjectCountEventSender::send(const KlassInfoEntry* entry, const Ticks& timestamp) {
 #if INCLUDE_TRACE
   assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
          "Only call this method if the event is enabled");
 
   EventObjectCountAfterGC event(UNTIMED);
-  event.set_gcId(gc_id.id());
+  event.set_gcId(GCId::current());
   event.set_class(entry->klass());
   event.set_count(entry->count());
   event.set_totalSize(entry->words() * BytesPerWord);
--- a/hotspot/src/share/vm/gc/shared/objectCountEventSender.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/objectCountEventSender.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -36,7 +36,7 @@
 
 class ObjectCountEventSender : public AllStatic {
  public:
-  static void send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp);
+  static void send(const KlassInfoEntry* entry, const Ticks& timestamp);
   static bool should_send_event();
 };
 
--- a/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -192,8 +192,8 @@
 class GCRefTraceTime : public StackObj {
   GCTraceTimeImpl _gc_trace_time;
  public:
-  GCRefTraceTime(const char* title, bool doit, GCTimer* timer, GCId gc_id, size_t count) :
-    _gc_trace_time(title, doit, false, timer, gc_id) {
+  GCRefTraceTime(const char* title, bool doit, GCTimer* timer, size_t count) :
+    _gc_trace_time(title, doit, false, timer) {
     log_ref_count(count, doit);
   }
 };
@@ -203,8 +203,7 @@
   OopClosure*                  keep_alive,
   VoidClosure*                 complete_gc,
   AbstractRefProcTaskExecutor* task_executor,
-  GCTimer*                     gc_timer,
-  GCId                         gc_id) {
+  GCTimer*                     gc_timer) {
 
   assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
   // Stop treating discovered references specially.
@@ -233,7 +232,7 @@
 
   // Soft references
   {
-    GCRefTraceTime tt("SoftReference", trace_time, gc_timer, gc_id, stats.soft_count());
+    GCRefTraceTime tt("SoftReference", trace_time, gc_timer, stats.soft_count());
     process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
@@ -242,21 +241,21 @@
 
   // Weak references
   {
-    GCRefTraceTime tt("WeakReference", trace_time, gc_timer, gc_id, stats.weak_count());
+    GCRefTraceTime tt("WeakReference", trace_time, gc_timer, stats.weak_count());
     process_discovered_reflist(_discoveredWeakRefs, NULL, true,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
 
   // Final references
   {
-    GCRefTraceTime tt("FinalReference", trace_time, gc_timer, gc_id, stats.final_count());
+    GCRefTraceTime tt("FinalReference", trace_time, gc_timer, stats.final_count());
     process_discovered_reflist(_discoveredFinalRefs, NULL, false,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
 
   // Phantom references
   {
-    GCRefTraceTime tt("PhantomReference", trace_time, gc_timer, gc_id, stats.phantom_count());
+    GCRefTraceTime tt("PhantomReference", trace_time, gc_timer, stats.phantom_count());
     process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
                                is_alive, keep_alive, complete_gc, task_executor);
 
@@ -273,7 +272,7 @@
   // thus use JNI weak references to circumvent the phantom references and
   // resurrect a "post-mortem" object.
   {
-    GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id);
+    GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer);
     NOT_PRODUCT(log_ref_count(count_jni_refs(), trace_time);)
     if (task_executor != NULL) {
       task_executor->set_single_threaded_mode();
@@ -1152,13 +1151,12 @@
   OopClosure* keep_alive,
   VoidClosure* complete_gc,
   YieldClosure* yield,
-  GCTimer* gc_timer,
-  GCId     gc_id) {
+  GCTimer* gc_timer) {
 
   // Soft references
   {
     GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer, gc_id);
+              false, gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1171,7 +1169,7 @@
   // Weak references
   {
     GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer, gc_id);
+              false, gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1184,7 +1182,7 @@
   // Final references
   {
     GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer, gc_id);
+              false, gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1197,7 +1195,7 @@
   // Phantom references
   {
     GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer, gc_id);
+              false, gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
--- a/hotspot/src/share/vm/gc/shared/referenceProcessor.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -331,8 +331,7 @@
                                       OopClosure*        keep_alive,
                                       VoidClosure*       complete_gc,
                                       YieldClosure*      yield,
-                                      GCTimer*           gc_timer,
-                                      GCId               gc_id);
+                                      GCTimer*           gc_timer);
 
   // Returns the name of the discovered reference list
   // occupying the i / _num_q slot.
@@ -441,8 +440,7 @@
                                 OopClosure*                  keep_alive,
                                 VoidClosure*                 complete_gc,
                                 AbstractRefProcTaskExecutor* task_executor,
-                                GCTimer *gc_timer,
-                                GCId    gc_id);
+                                GCTimer *gc_timer);
 
   // Enqueue references at end of GC (called by the garbage collector)
   bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
--- a/hotspot/src/share/vm/gc/shared/workgroup.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/workgroup.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
@@ -328,6 +329,7 @@
 void GangWorker::run_task(WorkData data) {
   print_task_started(data);
 
+  GCIdMark gc_id_mark(data._task->gc_id());
   data._task->work(data._worker_id);
 
   print_task_done(data);
--- a/hotspot/src/share/vm/gc/shared/workgroup.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/workgroup.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -28,6 +28,7 @@
 #include "memory/allocation.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/thread.hpp"
+#include "gc/shared/gcId.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
@@ -54,9 +55,13 @@
 // You subclass this to supply your own work() method
 class AbstractGangTask VALUE_OBJ_CLASS_SPEC {
   const char* _name;
+  const uint _gc_id;
 
  public:
-  AbstractGangTask(const char* name) : _name(name) {}
+  AbstractGangTask(const char* name) :
+    _name(name),
+    _gc_id(GCId::current_raw())
+ {}
 
   // The abstract work method.
   // The argument tells you which member of the gang you are.
@@ -64,6 +69,7 @@
 
   // Debugging accessor for the name.
   const char* name() const { return _name; }
+  const uint gc_id() const { return _gc_id; }
 };
 
 struct WorkData {
--- a/hotspot/src/share/vm/runtime/thread.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -31,6 +31,7 @@
 #include "code/codeCacheExtensions.hpp"
 #include "code/scopeDesc.hpp"
 #include "compiler/compileBroker.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "interpreter/interpreter.hpp"
@@ -1149,6 +1150,7 @@
 NamedThread::NamedThread() : Thread() {
   _name = NULL;
   _processed_thread = NULL;
+  _gc_id = GCId::undefined();
 }
 
 NamedThread::~NamedThread() {
--- a/hotspot/src/share/vm/runtime/thread.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -678,6 +678,7 @@
   char* _name;
   // log JavaThread being processed by oops_do
   JavaThread* _processed_thread;
+  uint _gc_id; // The current GC id when a thread takes part in GC
 
  public:
   NamedThread();
@@ -690,6 +691,9 @@
   JavaThread *processed_thread() { return _processed_thread; }
   void set_processed_thread(JavaThread *thread) { _processed_thread = thread; }
   virtual void print_on(outputStream* st) const;
+
+  void set_gc_id(uint gc_id) { _gc_id = gc_id; }
+  uint gc_id() { return _gc_id; }
 };
 
 // Worker threads are named and have an id of an assigned work.
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Wed Sep 30 09:07:21 2015 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "compiler/compileLog.hpp"
 #include "gc/shared/gcId.hpp"
+#include "gc/shared/gcId.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/os.hpp"
@@ -238,11 +239,11 @@
   return;
 }
 
-void outputStream::gclog_stamp(const GCId& gc_id) {
+void outputStream::gclog_stamp() {
   date_stamp(PrintGCDateStamps);
   stamp(PrintGCTimeStamps);
   if (PrintGCID) {
-    print("#%u: ", gc_id.id());
+    print("#%u: ", GCId::current());
   }
 }
 
--- a/hotspot/src/share/vm/utilities/ostream.hpp	Tue Sep 29 17:44:58 2015 +0200
+++ b/hotspot/src/share/vm/utilities/ostream.hpp	Wed Sep 30 09:07:21 2015 +0200
@@ -108,7 +108,7 @@
    void date_stamp(bool guard) {
      date_stamp(guard, "", ": ");
    }
-   void gclog_stamp(const GCId& gc_id);
+   void gclog_stamp();
 
    // portable printing of 64 bit integers
    void print_jlong(jlong value);