New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents. JEP-349-branch
authormgronlun
Sat, 24 Aug 2019 14:30:27 +0200
branchJEP-349-branch
changeset 57870 00860d9caf4d
parent 57862 84ef29ccac56
child 57871 7d2478b04e95
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
src/hotspot/share/jfr/dcmd/jfrDcmds.cpp
src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp
src/hotspot/share/jfr/jfr.cpp
src/hotspot/share/jfr/jni/jfrJavaCall.cpp
src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp
src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp
src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp
src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp
src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp
src/hotspot/share/jfr/leakprofiler/chains/edge.hpp
src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp
src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp
src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp
src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.hpp
src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp
src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp
src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp
src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.hpp
src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp
src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.hpp
src/hotspot/share/jfr/leakprofiler/emitEventOperation.cpp
src/hotspot/share/jfr/leakprofiler/emitEventOperation.hpp
src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp
src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp
src/hotspot/share/jfr/leakprofiler/startOperation.hpp
src/hotspot/share/jfr/leakprofiler/stopOperation.hpp
src/hotspot/share/jfr/leakprofiler/utilities/vmOperation.hpp
src/hotspot/share/jfr/metadata/metadata.xml
src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp
src/hotspot/share/jfr/periodic/jfrOSInterface.cpp
src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp
src/hotspot/share/jfr/recorder/jfrRecorder.cpp
src/hotspot/share/jfr/recorder/jfrRecorder.hpp
src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp
src/hotspot/share/jfr/recorder/repository/jfrChunk.hpp
src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp
src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp
src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp
src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp
src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.hpp
src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp
src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp
src/hotspot/share/jfr/support/jfrFlush.cpp
src/hotspot/share/jfr/support/jfrFlush.hpp
src/hotspot/share/jfr/support/jfrThreadLocal.cpp
src/hotspot/share/jfr/support/jfrThreadLocal.hpp
src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp
src/hotspot/share/jfr/utilities/jfrHashtable.hpp
src/hotspot/share/jfr/utilities/jfrIterator.hpp
src/hotspot/share/jfr/utilities/jfrTypes.hpp
src/hotspot/share/jfr/writers/jfrJavaEventWriter.cpp
src/hotspot/share/jfr/writers/jfrTypeWriterHost.hpp
src/hotspot/share/jfr/writers/jfrWriterHost.inline.hpp
src/hotspot/share/runtime/vmOperations.hpp
src/jdk.jfr/share/classes/jdk/jfr/consumer/ChunkParser.java
--- a/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -444,7 +444,13 @@
 
   jobjectArray settings = NULL;
   if (_settings.is_set()) {
-    const int length = _settings.value()->array()->length();
+    int length = _settings.value()->array()->length();
+    if (length == 1) {
+      const char* c_str = _settings.value()->array()->at(0);
+      if (strcmp(c_str, "none") == 0) {
+        length = 0;
+      }
+    }
     settings = JfrJavaSupport::new_string_array(length, CHECK);
     assert(settings != NULL, "invariant");
     for (int i = 0; i < length; ++i) {
--- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -1521,7 +1521,7 @@
     assert(new_method != NULL, "invariant");
     assert(new_method->name() == old_method->name(), "invariant");
     assert(new_method->signature() == old_method->signature(), "invariant");
-    *new_method->trace_flags_addr() = old_method->trace_flags();
+    new_method->set_trace_flags(old_method->trace_flags());
     assert(new_method->trace_flags() == old_method->trace_flags(), "invariant");
   }
 }
--- a/src/hotspot/share/jfr/jfr.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/jfr.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -96,7 +96,9 @@
 }
 
 void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
-  LeakProfiler::oops_do(is_alive, f);
+  if (LeakProfiler::is_running()) {
+    LeakProfiler::oops_do(is_alive, f);
+  }
 }
 
 bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter) {
--- a/src/hotspot/share/jfr/jni/jfrJavaCall.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/jni/jfrJavaCall.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -184,7 +184,7 @@
   }
 }
 
-JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(0) {
+JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(-1) {
   assert(result != NULL, "invariant");
 }
 
@@ -193,7 +193,7 @@
   _klass(NULL),
   _name(NULL),
   _signature(NULL),
-  _array_length(0) {
+  _array_length(-1) {
   assert(result != NULL, "invariant");
   if (klass_name != NULL) {
     set_klass(klass_name, CHECK);
@@ -210,7 +210,7 @@
   _klass(NULL),
   _name(NULL),
   _signature(NULL),
-  _array_length(0) {
+  _array_length(-1) {
   assert(result != NULL, "invariant");
   if (klass != NULL) {
     set_klass(klass);
--- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -99,7 +99,6 @@
 }
 
 void BFSClosure::process() {
-
   process_root_set();
   process_queue();
 }
@@ -138,7 +137,6 @@
 
     // if we are processinig initial root set, don't add to queue
     if (_current_parent != NULL) {
-      assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant");
       _edge_queue->add(_current_parent, reference);
     }
 
@@ -151,20 +149,8 @@
 void BFSClosure::add_chain(const oop* reference, const oop pointee) {
   assert(pointee != NULL, "invariant");
   assert(NULL == pointee->mark(), "invariant");
-
-  const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2;
-  ResourceMark rm;
-  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
-  size_t idx = 0;
-  chain[idx++] = Edge(NULL, reference);
-  // aggregate from breadth-first search
-  const Edge* current = _current_parent;
-  while (current != NULL) {
-    chain[idx++] = Edge(NULL, current->reference());
-    current = current->parent();
-  }
-  assert(length == idx, "invariant");
-  _edge_store->add_chain(chain, length);
+  Edge leak_edge(_current_parent, reference);
+  _edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
 }
 
 void BFSClosure::dfs_fallback() {
@@ -241,3 +227,12 @@
     closure_impl(UnifiedOop::encode(ref), pointee);
   }
 }
+
+void BFSClosure::do_root(const oop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, HeapWordSize), "invariant");
+  assert(*ref != NULL, "invariant");
+  if (!_edge_queue->is_full()) {
+    _edge_queue->add(NULL, ref);
+  }
+}
--- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -26,7 +26,6 @@
 #define SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
 class BitSet;
 class Edge;
@@ -65,6 +64,7 @@
  public:
   BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits);
   void process();
+  void do_root(const oop* ref);
 
   virtual void do_oop(oop* ref);
   virtual void do_oop(narrowOop* ref);
--- a/src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -47,7 +47,7 @@
 
   BitMap::idx_t mark_obj(const HeapWord* addr) {
     const BitMap::idx_t bit = addr_to_bit(addr);
-    _bits.par_set_bit(bit);
+    _bits.set_bit(bit);
     return bit;
   }
 
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,14 +23,14 @@
  */
 
 #include "precompiled.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
 #include "jfr/leakprofiler/chains/edge.hpp"
 #include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
-#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
-#include "jfr/leakprofiler/utilities/rootType.hpp"
-#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
@@ -88,15 +88,15 @@
   // Mark root set, to avoid going sideways
   _max_depth = 1;
   _ignore_root_set = false;
-  DFSClosure dfs1;
-  RootSetClosure::process_roots(&dfs1);
+  DFSClosure dfs;
+  RootSetClosure<DFSClosure> rs(&dfs);
+  rs.process();
 
   // Depth-first search
   _max_depth = max_dfs_depth;
   _ignore_root_set = true;
   assert(_start_edge == NULL, "invariant");
-  DFSClosure dfs2;
-  RootSetClosure::process_roots(&dfs2);
+  rs.process();
 }
 
 void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
@@ -133,30 +133,29 @@
 }
 
 void DFSClosure::add_chain() {
-  const size_t length = _start_edge == NULL ? _depth + 1 :
-                        _start_edge->distance_to_root() + 1 + _depth + 1;
+  const size_t array_length = _depth + 2;
 
   ResourceMark rm;
-  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
+  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, array_length);
   size_t idx = 0;
 
   // aggregate from depth-first search
   const DFSClosure* c = this;
   while (c != NULL) {
-    chain[idx++] = Edge(NULL, c->reference());
+    const size_t next = idx + 1;
+    chain[idx++] = Edge(&chain[next], c->reference());
     c = c->parent();
   }
-
-  assert(idx == _depth + 1, "invariant");
+  assert(_depth + 1 == idx, "invariant");
+  assert(array_length == idx + 1, "invariant");
 
   // aggregate from breadth-first search
-  const Edge* current = _start_edge;
-  while (current != NULL) {
-    chain[idx++] = Edge(NULL, current->reference());
-    current = current->parent();
+  if (_start_edge != NULL) {
+    chain[idx++] = *_start_edge;
+  } else {
+    chain[idx - 1] = Edge(NULL, chain[idx - 1].reference());
   }
-  assert(idx == length, "invariant");
-  _edge_store->add_chain(chain, length);
+  _edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0));
 }
 
 void DFSClosure::do_oop(oop* ref) {
@@ -176,3 +175,11 @@
     closure_impl(UnifiedOop::encode(ref), pointee);
   }
 }
+
+void DFSClosure::do_root(const oop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, HeapWordSize), "invariant");
+  const oop pointee = *ref;
+  assert(pointee != NULL, "invariant");
+  closure_impl(ref, pointee);
+}
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -26,7 +26,6 @@
 #define SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
 class BitSet;
 class Edge;
@@ -34,7 +33,7 @@
 class EdgeQueue;
 
 // Class responsible for iterating the heap depth-first
-class DFSClosure: public BasicOopIterateClosure {
+class DFSClosure : public BasicOopIterateClosure {
  private:
   static EdgeStore* _edge_store;
   static BitSet*    _mark_bits;
@@ -57,6 +56,7 @@
  public:
   static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
   static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits);
+  void do_root(const oop* ref);
 
   virtual void do_oop(oop* ref);
   virtual void do_oop(narrowOop* ref);
--- a/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -29,7 +29,7 @@
 #include "oops/oopsHierarchy.hpp"
 
 class Edge {
- private:
+ protected:
   const Edge* _parent;
   const oop* _reference;
  public:
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,37 +27,17 @@
 #include "jfr/leakprofiler/chains/edgeUtils.hpp"
 #include "oops/oop.inline.hpp"
 
-RoutableEdge::RoutableEdge() : Edge() {}
-RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference),
-                                                                       _skip_edge(NULL),
-                                                                       _skip_length(0),
-                                                                       _processed(false) {}
+StoredEdge::StoredEdge() : Edge() {}
+StoredEdge::StoredEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {}
 
-RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge),
-                                               _skip_edge(NULL),
-                                               _skip_length(0),
-                                               _processed(false) {}
-
-RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge),
-                                                      _skip_edge(edge._skip_edge),
-                                                      _skip_length(edge._skip_length),
-                                                      _processed(edge._processed) {}
+StoredEdge::StoredEdge(const Edge& edge) : Edge(edge), _gc_root_id(0), _skip_length(0) {}
 
-void RoutableEdge::operator=(const RoutableEdge& edge) {
-  Edge::operator=(edge);
-  _skip_edge = edge._skip_edge;
-  _skip_length = edge._skip_length;
-  _processed = edge._processed;
-}
+StoredEdge::StoredEdge(const StoredEdge& edge) : Edge(edge), _gc_root_id(edge._gc_root_id), _skip_length(edge._skip_length) {}
 
-size_t RoutableEdge::logical_distance_to_root() const {
-  size_t depth = 0;
-  const RoutableEdge* current = logical_parent();
-  while (current != NULL) {
-    depth++;
-    current = current->logical_parent();
-  }
-  return depth;
+void StoredEdge::operator=(const StoredEdge& edge) {
+  Edge::operator=(edge);
+  _gc_root_id = edge._gc_root_id;
+  _skip_length = edge._skip_length;
 }
 
 traceid EdgeStore::_edge_id_counter = 0;
@@ -69,79 +49,12 @@
 EdgeStore::~EdgeStore() {
   assert(_edges != NULL, "invariant");
   delete _edges;
-  _edges = NULL;
-}
-
-const Edge* EdgeStore::get_edge(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
-  return entry != NULL ? entry->literal_addr() : NULL;
-}
-
-const Edge* EdgeStore::put(const Edge* edge) {
-  assert(edge != NULL, "invariant");
-  const RoutableEdge e = *edge;
-  assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant");
-  EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference());
-  return entry.literal_addr();
-}
-
-traceid EdgeStore::get_id(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
-  assert(entry != NULL, "invariant");
-  return entry->id();
-}
-
-traceid EdgeStore::get_root_id(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  const Edge* root = EdgeUtils::root(*edge);
-  assert(root != NULL, "invariant");
-  return get_id(root);
-}
-
-void EdgeStore::add_chain(const Edge* chain, size_t length) {
-  assert(chain != NULL, "invariant");
-  assert(length > 0, "invariant");
-
-  size_t bottom_index = length - 1;
-  const size_t top_index = 0;
-
-  const Edge* stored_parent_edge = NULL;
-
-  // determine level of shared ancestry
-  for (; bottom_index > top_index; --bottom_index) {
-    const Edge* stored_edge = get_edge(&chain[bottom_index]);
-    if (stored_edge != NULL) {
-      stored_parent_edge = stored_edge;
-      continue;
-    }
-    break;
-  }
-
-  // insertion of new Edges
-  for (int i = (int)bottom_index; i >= (int)top_index; --i) {
-    Edge edge(stored_parent_edge, chain[i].reference());
-    stored_parent_edge = put(&edge);
-  }
-
-  const oop sample_object = stored_parent_edge->pointee();
-  assert(sample_object != NULL, "invariant");
-  assert(NULL == sample_object->mark(), "invariant");
-
-  // Install the "top" edge of the chain into the sample object mark oop.
-  // This associates the sample object with its navigable reference chain.
-  sample_object->set_mark(markOop(stored_parent_edge));
 }
 
 bool EdgeStore::is_empty() const {
   return !_edges->has_entries();
 }
 
-size_t EdgeStore::number_of_entries() const {
-  return _edges->cardinality();
-}
-
 void EdgeStore::assign_id(EdgeEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->id() == 0, "invariant");
@@ -153,3 +66,259 @@
   assert(entry->hash() == hash, "invariant");
   return true;
 }
+
+void EdgeStore::unlink(EdgeEntry* entry) {
+  assert(entry != NULL, "invariant");
+  // nothing
+}
+
+#ifdef ASSERT
+bool EdgeStore::contains(const oop* reference) const {
+  return get(reference) != NULL;
+}
+#endif
+
+StoredEdge* EdgeStore::get(const oop* reference) const {
+  assert(reference != NULL, "invariant");
+  const StoredEdge e(NULL, reference);
+  EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference);
+  return entry != NULL ? entry->literal_addr() : NULL;
+}
+
+StoredEdge* EdgeStore::put(const oop* reference) {
+  assert(reference != NULL, "invariant");
+  const StoredEdge e(NULL, reference);
+  assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant");
+  EdgeEntry& entry = _edges->put(e, (uintptr_t)reference);
+  return entry.literal_addr();
+}
+
+traceid EdgeStore::get_id(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
+  assert(entry != NULL, "invariant");
+  return entry->id();
+}
+
+traceid EdgeStore::gc_root_id(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  const traceid gc_root_id = static_cast<const StoredEdge*>(edge)->gc_root_id();
+  if (gc_root_id != 0) {
+    return gc_root_id;
+  }
+  // not cached
+  assert(edge != NULL, "invariant");
+  const Edge* const root = EdgeUtils::root(*edge);
+  assert(root != NULL, "invariant");
+  assert(root->parent() == NULL, "invariant");
+  return get_id(root);
+}
+
+static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_root, size_t* skip_length) {
+  assert(distance_to_root >= EdgeUtils::root_context, "invariant");
+  assert(*skip_length == 0, "invariant");
+  *skip_length = distance_to_root - (EdgeUtils::root_context - 1);
+  const Edge* const target = EdgeUtils::ancestor(**current, *skip_length);
+  assert(target != NULL, "invariant");
+  assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant");
+  return target;
+}
+
+bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) {
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  assert((*current)->distance_to_root() == distance_to_root, "invariant");
+
+  if (distance_to_root < EdgeUtils::root_context) {
+    // nothing to skip
+    return false;
+  }
+
+  size_t skip_length = 0;
+  const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length);
+  assert(skip_ancestor != NULL, "invariant");
+  (*previous)->set_skip_length(skip_length);
+
+  // lookup target
+  StoredEdge* stored_target = get(skip_ancestor->reference());
+  if (stored_target != NULL) {
+    (*previous)->set_parent(stored_target);
+    // linked to existing, complete
+    return true;
+  }
+
+  assert(stored_target == NULL, "invariant");
+  stored_target = put(skip_ancestor->reference());
+  assert(stored_target != NULL, "invariant");
+  (*previous)->set_parent(stored_target);
+  *previous = stored_target;
+  *current = skip_ancestor->parent();
+  return false;
+}
+
+static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) {
+  assert(current_stored != NULL, "invariant");
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  (*previous)->set_parent(current_stored);
+}
+
+static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) {
+  assert(edge != NULL, "invariant");
+  assert(distance != NULL, "invariant");
+  const StoredEdge* current = edge;
+  *distance = 1;
+  while (current != NULL && !current->is_skip_edge()) {
+    ++(*distance);
+    current = current->parent();
+  }
+  return current;
+}
+
+void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) {
+  assert(current_stored != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  size_t distance_to_skip_edge; // including the skip edge itself
+  const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge);
+  if (closest_skip_edge == NULL) {
+    // no found skip edge implies root
+    if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) {
+      link_edge(current_stored, previous);
+      return;
+    }
+    assert(current_stored->distance_to_root() == distance_to_skip_edge - 2, "invariant");
+    put_skip_edge(previous, reinterpret_cast<const Edge**>(&current_stored), distance_to_skip_edge - 2);
+    return;
+  }
+  assert(closest_skip_edge->is_skip_edge(), "invariant");
+  if (distance_to_skip_edge + previous_length <= EdgeUtils::leak_context) {
+    link_edge(current_stored, previous);
+    return;
+  }
+  // create a new skip edge with derived information from closest skip edge
+  (*previous)->set_skip_length(distance_to_skip_edge + closest_skip_edge->skip_length());
+  (*previous)->set_parent(closest_skip_edge->parent());
+}
+
+StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) {
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  assert(!contains((*current)->reference()), "invariant");
+  StoredEdge* const stored_edge = put((*current)->reference());
+  assert(stored_edge != NULL, "invariant");
+  link_edge(stored_edge, previous);
+  return stored_edge;
+}
+
+bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) {
+  assert(*previous != NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  size_t depth = 1;
+  while (*current != NULL && depth < limit) {
+    StoredEdge* stored_edge = get((*current)->reference());
+    if (stored_edge != NULL) {
+      link_with_existing_chain(stored_edge, previous, depth);
+      return true;
+    }
+    stored_edge = link_new_edge(previous, current);
+    assert((*previous)->parent() != NULL, "invariant");
+    *previous = stored_edge;
+    *current = (*current)->parent();
+    ++depth;
+  }
+  return NULL == *current;
+}
+
+// Install the immediate edge into the mark word of the leak candidate object
+StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) {
+  assert(edge != NULL, "invariant");
+  assert(!contains(edge->reference()), "invariant");
+  StoredEdge* const leak_context_edge = put(edge->reference());
+  oop sample_object = edge->pointee();
+  assert(sample_object != NULL, "invariant");
+  assert(NULL == sample_object->mark(), "invariant");
+  sample_object->set_mark(markOop(leak_context_edge));
+  return leak_context_edge;
+}
+
+/*
+ * The purpose of put_chain() is to reify the edge sequence
+ * discovered during heap traversal with a normalized logical copy.
+ * This copy consist of two sub-sequences and a connecting link (skip edge).
+ *
+ * "current" can be thought of as the cursor (search) edge, it is not in the edge store.
+ * "previous" is always an edge in the edge store.
+ * The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store.
+ */
+void EdgeStore::put_chain(const Edge* chain, size_t length) {
+  assert(chain != NULL, "invariant");
+  assert(chain->distance_to_root() + 1 == length, "invariant");
+  StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain);
+  assert(leak_context_edge != NULL, "invariant");
+  assert(leak_context_edge->parent() == NULL, "invariant");
+
+  if (1 == length) {
+    return;
+  }
+
+  const Edge* current = chain->parent();
+  assert(current != NULL, "invariant");
+  StoredEdge* previous = leak_context_edge;
+
+  // a leak context is the sequence of (limited) edges reachable from the leak candidate
+  if (put_edges(&previous, &current, EdgeUtils::leak_context)) {
+    // complete
+    assert(previous != NULL, "invariant");
+    put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
+    return;
+  }
+
+  const size_t distance_to_root = length > EdgeUtils::leak_context ? length - 1 - EdgeUtils::leak_context : length - 1;
+  assert(current->distance_to_root() == distance_to_root, "invariant");
+
+  // a skip edge is the logical link
+  // connecting the leak context sequence with the root context sequence
+  if (put_skip_edge(&previous, &current, distance_to_root)) {
+    // complete
+    assert(previous != NULL, "invariant");
+    assert(previous->is_skip_edge(), "invariant");
+    assert(previous->parent() != NULL, "invariant");
+    put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent()));
+    return;
+  }
+
+  assert(current->distance_to_root() < EdgeUtils::root_context, "invariant");
+
+  // a root context is the sequence of (limited) edges reachable from the root
+  put_edges(&previous, &current, EdgeUtils::root_context);
+  assert(previous != NULL, "invariant");
+  put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
+}
+
+void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const {
+  assert(leak_context_edge != NULL, "invariant");
+  assert(root != NULL, "invariant");
+  store_gc_root_id_in_leak_context_edge(leak_context_edge, root);
+  assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant");
+}
+
+// To avoid another traversal to resolve the root edge id later,
+// cache it in the immediate leak context edge for fast retrieval.
+void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const {
+  assert(leak_context_edge != NULL, "invariant");
+  assert(leak_context_edge->gc_root_id() == 0, "invariant");
+  assert(root != NULL, "invariant");
+  assert(root->parent() == NULL, "invariant");
+  assert(root->distance_to_root() == 0, "invariant");
+  const StoredEdge* const stored_root = static_cast<const StoredEdge*>(root);
+  traceid root_id = stored_root->gc_root_id();
+  if (root_id == 0) {
+    root_id = get_id(root);
+    stored_root->set_gc_root_id(root_id);
+  }
+  assert(root_id != 0, "invariant");
+  leak_context_edge->set_gc_root_id(root_id);
+  assert(leak_context_edge->gc_root_id() == stored_root->gc_root_id(), "invariant");
+}
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -25,64 +25,40 @@
 #ifndef SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
 #define SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
 
+#include "jfr/leakprofiler/chains/edge.hpp"
 #include "jfr/utilities/jfrHashtable.hpp"
-#include "jfr/leakprofiler/chains/edge.hpp"
 #include "memory/allocation.hpp"
 
 typedef u8 traceid;
 
-class RoutableEdge : public Edge {
+class StoredEdge : public Edge {
  private:
-  mutable const RoutableEdge* _skip_edge;
-  mutable size_t _skip_length;
-  mutable bool _processed;
+  mutable traceid _gc_root_id;
+  size_t _skip_length;
 
  public:
-  RoutableEdge();
-  RoutableEdge(const Edge* parent, const oop* reference);
-  RoutableEdge(const Edge& edge);
-  RoutableEdge(const RoutableEdge& edge);
-  void operator=(const RoutableEdge& edge);
-
-  const RoutableEdge* skip_edge() const { return _skip_edge; }
-  size_t skip_length() const { return _skip_length; }
+  StoredEdge();
+  StoredEdge(const Edge* parent, const oop* reference);
+  StoredEdge(const Edge& edge);
+  StoredEdge(const StoredEdge& edge);
+  void operator=(const StoredEdge& edge);
 
-  bool is_skip_edge() const { return _skip_edge != NULL; }
-  bool processed() const { return _processed; }
-  bool is_sentinel() const {
-    return _skip_edge == NULL && _skip_length == 1;
-  }
-
-  void set_skip_edge(const RoutableEdge* edge) const {
-    assert(!is_skip_edge(), "invariant");
-    assert(edge != this, "invariant");
-    _skip_edge = edge;
-  }
+  traceid gc_root_id() const { return _gc_root_id; }
+  void set_gc_root_id(traceid root_id) const { _gc_root_id = root_id; }
 
-  void set_skip_length(size_t length) const {
-    _skip_length = length;
-  }
-
-  void set_processed() const {
-    assert(!_processed, "invariant");
-    _processed = true;
-  }
+  bool is_skip_edge() const { return _skip_length != 0; }
+  size_t skip_length() const { return _skip_length; }
+  void set_skip_length(size_t length) { _skip_length = length; }
 
-  // true navigation according to physical tree representation
-  const RoutableEdge* physical_parent() const {
-    return static_cast<const RoutableEdge*>(parent());
-  }
+  void set_parent(const Edge* edge) { this->_parent = edge; }
 
-  // logical navigation taking skip levels into account
-  const RoutableEdge* logical_parent() const {
-    return is_skip_edge() ? skip_edge() : physical_parent();
+  StoredEdge* parent() const {
+    return const_cast<StoredEdge*>(static_cast<const StoredEdge*>(Edge::parent()));
   }
-
-  size_t logical_distance_to_root() const;
 };
 
 class EdgeStore : public CHeapObj<mtTracing> {
-  typedef HashTableHost<RoutableEdge, traceid, Entry, EdgeStore> EdgeHashTable;
+  typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable;
   typedef EdgeHashTable::HashEntry EdgeEntry;
   template <typename,
             typename,
@@ -90,6 +66,9 @@
             typename,
             size_t>
   friend class HashTableHost;
+  friend class EventEmitter;
+  friend class ObjectSampleWriter;
+  friend class ObjectSampleCheckpoint;
  private:
   static traceid _edge_id_counter;
   EdgeHashTable* _edges;
@@ -97,23 +76,33 @@
   // Hash table callbacks
   void assign_id(EdgeEntry* entry);
   bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
+  void unlink(EdgeEntry* entry);
 
-  const Edge* get_edge(const Edge* edge) const;
-  const Edge* put(const Edge* edge);
+  StoredEdge* get(const oop* reference) const;
+  StoredEdge* put(const oop* reference);
+  traceid gc_root_id(const Edge* edge) const;
+
+  bool put_edges(StoredEdge** previous, const Edge** current, size_t length);
+  bool put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root);
+  void put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const;
+
+  StoredEdge* associate_leak_context_with_candidate(const Edge* edge);
+  void store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const;
+  StoredEdge* link_new_edge(StoredEdge** previous, const Edge** current);
+  void link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length);
+
+  template <typename T>
+  void iterate(T& functor) const { _edges->iterate_value<T>(functor); }
+
+  DEBUG_ONLY(bool contains(const oop* reference) const;)
 
  public:
   EdgeStore();
   ~EdgeStore();
 
-  void add_chain(const Edge* chain, size_t length);
   bool is_empty() const;
-  size_t number_of_entries() const;
-
   traceid get_id(const Edge* edge) const;
-  traceid get_root_id(const Edge* edge) const;
-
-  template <typename T>
-  void iterate_edges(T& functor) const { _edges->iterate_value<T>(functor); }
+  void put_chain(const Edge* chain, size_t length);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,11 +38,7 @@
   return (const Edge*)edge.pointee()->mark() == &edge;
 }
 
-bool EdgeUtils::is_root(const Edge& edge) {
-  return edge.is_root();
-}
-
-static int field_offset(const Edge& edge) {
+static int field_offset(const StoredEdge& edge) {
   assert(!edge.is_root(), "invariant");
   const oop ref_owner = edge.reference_owner();
   assert(ref_owner != NULL, "invariant");
@@ -56,7 +52,7 @@
   return offset;
 }
 
-static const InstanceKlass* field_type(const Edge& edge) {
+static const InstanceKlass* field_type(const StoredEdge& edge) {
   assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant");
   return (const InstanceKlass*)edge.reference_owner_klass();
 }
@@ -138,175 +134,18 @@
     current = parent;
     parent = current->parent();
   }
-  return current;
-}
-
-// The number of references associated with the leak node;
-// can be viewed as the leak node "context".
-// Used to provide leak context for a "capped/skipped" reference chain.
-static const size_t leak_context = 100;
-
-// The number of references associated with the root node;
-// can be viewed as the root node "context".
-// Used to provide root context for a "capped/skipped" reference chain.
-static const size_t root_context = 100;
-
-// A limit on the reference chain depth to be serialized,
-static const size_t max_ref_chain_depth = leak_context + root_context;
-
-const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) {
-  const RoutableEdge* current = &edge;
-  const RoutableEdge* parent = current->physical_parent();
-  size_t seek = 0;
-  while (parent != NULL && seek != skip_length) {
-    seek++;
-    current = parent;
-    parent = parent->physical_parent();
-  }
-  return current;
-}
-
-#ifdef ASSERT
-static void validate_skip_target(const RoutableEdge* skip_target) {
-  assert(skip_target != NULL, "invariant");
-  assert(skip_target->distance_to_root() + 1 == root_context, "invariant");
-  assert(skip_target->is_sentinel(), "invariant");
-}
-
-static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) {
-  assert(new_skip_edge != NULL, "invariant");
-  assert(new_skip_edge->is_skip_edge(), "invariant");
-  if (last_skip_edge != NULL) {
-    const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment);
-    validate_skip_target(target->logical_parent());
-    return;
-  }
-  assert(last_skip_edge == NULL, "invariant");
-  // only one level of logical indirection
-  validate_skip_target(new_skip_edge->logical_parent());
-}
-#endif // ASSERT
-
-static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) {
-  assert(new_skip_edge != NULL, "invariant");
-  assert(!new_skip_edge->is_skip_edge(), "invariant");
-  assert(!new_skip_edge->processed(), "invariant");
-  const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance);
-  assert(skip_target != NULL, "invariant");
-  new_skip_edge->set_skip_edge(skip_target);
-  new_skip_edge->set_skip_length(skip_target_distance);
-  assert(new_skip_edge->is_skip_edge(), "invariant");
-  assert(new_skip_edge->logical_parent() == skip_target, "invariant");
-}
-
-static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) {
-  assert(distance == 0, "invariant");
-  const RoutableEdge* current = &edge;
-  while (current != NULL) {
-    if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) {
-      return current;
-    }
-    current = current->physical_parent();
-    ++distance;
-  }
+  assert(current != NULL, "invariant");
   return current;
 }
 
-static void collapse_overlapping_chain(const RoutableEdge& edge,
-                                       const RoutableEdge* first_processed_edge,
-                                       size_t first_processed_distance) {
-  assert(first_processed_edge != NULL, "invariant");
-  // first_processed_edge is already processed / written
-  assert(first_processed_edge->processed(), "invariant");
-  assert(first_processed_distance + 1 <= leak_context, "invariant");
-
-  // from this first processed edge, attempt to fetch the last skip edge
-  size_t last_skip_edge_distance = 0;
-  const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance);
-  const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1;
-
-  if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) {
-    // complete chain can be accommodated without modification
-    return;
-  }
-
-  // backtrack one edge from existing processed edge
-  const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1);
-  assert(new_skip_edge != NULL, "invariant");
-  assert(!new_skip_edge->processed(), "invariant");
-  assert(new_skip_edge->parent() == first_processed_edge, "invariant");
-
-  size_t adjustment = 0;
-  if (last_skip_edge != NULL) {
-    assert(leak_context - 1 > first_processed_distance - 1, "invariant");
-    adjustment = leak_context - first_processed_distance - 1;
-    assert(last_skip_edge_distance + 1 > adjustment, "invariant");
-    install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment);
-  } else {
-    install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context);
-    new_skip_edge->logical_parent()->set_skip_length(1); // sentinel
+const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) {
+  const Edge* current = &edge;
+  const Edge* parent = current->parent();
+  size_t seek = 0;
+  while (parent != NULL && seek != distance) {
+    seek++;
+    current = parent;
+    parent = parent->parent();
   }
-
-  DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);)
-}
-
-static void collapse_non_overlapping_chain(const RoutableEdge& edge,
-                                           const RoutableEdge* first_processed_edge,
-                                           size_t first_processed_distance) {
-  assert(first_processed_edge != NULL, "invariant");
-  assert(!first_processed_edge->processed(), "invariant");
-  // this implies that the first "processed" edge is the leak context relative "leaf"
-  assert(first_processed_distance + 1 == leak_context, "invariant");
-
-  const size_t distance_to_root = edge.distance_to_root();
-  if (distance_to_root + 1 <= max_ref_chain_depth) {
-    // complete chain can be accommodated without constructing a skip edge
-    return;
-  }
-
-  install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context);
-  first_processed_edge->logical_parent()->set_skip_length(1); // sentinel
-
-  DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);)
-}
-
-static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) {
-  assert(distance == 0, "invariant");
-  const RoutableEdge* current = &edge;
-  while (current != NULL && distance < leak_context - 1) {
-    if (current->processed()) {
-      return current;
-    }
-    current = current->physical_parent();
-    ++distance;
-  }
-  assert(distance <= leak_context - 1, "invariant");
   return current;
 }
-
-/*
- * Some vocabulary:
- * -----------
- * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges.
- * "Processed / written" means an edge that has already been serialized.
- * "Skip edge" is an edge that contains additional information for logical routing purposes.
- * "Skip target" is an edge used as a destination for a skip edge
- */
-void EdgeUtils::collapse_chain(const RoutableEdge& edge) {
-  assert(is_leak_edge(edge), "invariant");
-
-  // attempt to locate an already processed edge inside current leak context (if any)
-  size_t first_processed_distance = 0;
-  const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance);
-  if (first_processed_edge == NULL) {
-    return;
-  }
-
-  if (first_processed_edge->processed()) {
-    collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance);
-  } else {
-    collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance);
-  }
-
-  assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant");
-}
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -28,15 +28,17 @@
 #include "memory/allocation.hpp"
 
 class Edge;
-class RoutableEdge;
 class Symbol;
 
 class EdgeUtils : public AllStatic {
  public:
-  static bool is_leak_edge(const Edge& edge);
+  static const size_t leak_context = 100;
+  static const size_t root_context = 100;
+  static const size_t max_ref_chain_depth = leak_context + root_context;
 
+  static bool is_leak_edge(const Edge& edge);
   static const Edge* root(const Edge& edge);
-  static bool is_root(const Edge& edge);
+  static const Edge* ancestor(const Edge& edge, size_t distance);
 
   static bool is_array_element(const Edge& edge);
   static int array_index(const Edge& edge);
@@ -44,8 +46,6 @@
 
   static const Symbol* field_name_symbol(const Edge& edge);
   static jshort field_modifiers(const Edge& edge);
-
-  static void collapse_chain(const RoutableEdge& edge);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/chains/bfsClosure.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/chains/edgeQueue.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
+#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/leakprofiler/utilities/granularTimer.hpp"
+#include "logging/log.hpp"
+#include "memory/universe.hpp"
+
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all) :
+  _sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all) {}
+
+/* The EdgeQueue is backed by directly managed virtual memory.
+ * We will attempt to dimension an initial reservation
+ * in proportion to the size of the heap (represented by heap_region).
+ * Initial memory reservation: 5% of the heap OR at least 32 Mb
+ * Commit ratio: 1 : 10 (subject to allocation granularties)
+ */
+static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
+  const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
+  assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
+  return memory_reservation_bytes;
+}
+
+static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
+  const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
+  assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
+  return memory_commit_block_size_bytes;
+}
+
+static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
+  log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
+  log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
+  log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
+  if (edge_queue.reserved_size() > 0) {
+    log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
+      ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
+  }
+}
+
+void PathToGcRootsOperation::doit() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(_cutoff_ticks > 0, "invariant");
+
+  // The bitset used for marking is dimensioned as a function of the heap size
+  const MemRegion heap_region = Universe::heap()->reserved_region();
+  BitSet mark_bits(heap_region);
+
+  // The edge queue is dimensioned as a fraction of the heap size
+  const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
+  EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
+
+  // The initialize() routines will attempt to reserve and allocate backing storage memory.
+  // Failure to accommodate will render root chain processing impossible.
+  // As a fallback on failure, just write out the existing samples, flat, without chains.
+  if (!(mark_bits.initialize() && edge_queue.initialize())) {
+    log_warning(jfr)("Unable to allocate memory for root chain processing");
+    return;
+  }
+
+  // Save the original markWord for the potential leak objects,
+  // to be restored on function exit
+  ObjectSampleMarker marker;
+  if (ObjectSampleCheckpoint::save_mark_words(_sampler, marker, _emit_all) == 0) {
+    // no valid samples to process
+    return;
+  }
+
+  // Necessary condition for attempting a root set iteration
+  Universe::heap()->ensure_parsability(false);
+
+  BFSClosure bfs(&edge_queue, _edge_store, &mark_bits);
+  RootSetClosure<BFSClosure> roots(&bfs);
+
+  GranularTimer::start(_cutoff_ticks, 1000000);
+  roots.process();
+  if (edge_queue.is_full()) {
+    // Pathological case where roots don't fit in queue
+    // Do a depth-first search, but mark roots first
+    // to avoid walking sideways over roots
+    DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits);
+  } else {
+    bfs.process();
+  }
+  GranularTimer::stop();
+  log_edge_queue_summary(edge_queue);
+
+  // Emit old objects including their reference chains as events
+  EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time());
+  emitter.write_events(_sampler, _edge_store, _emit_all);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
+#define SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
+
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
+
+class EdgeStore;
+class ObjectSampler;
+
+// Safepoint operation for finding paths to gc roots
+class PathToGcRootsOperation : public OldObjectVMOperation {
+ private:
+  ObjectSampler* _sampler;
+  EdgeStore* const _edge_store;
+  const int64_t _cutoff_ticks;
+  const bool _emit_all;
+
+ public:
+  PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all);
+  virtual void doit();
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
--- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -28,27 +28,26 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "jfr/leakprofiler/chains/bfsClosure.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
-#include "jfr/leakprofiler/utilities/saveRestore.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
 #include "memory/universe.hpp"
 #include "oops/access.inline.hpp"
+#include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.hpp"
 #include "services/management.hpp"
 #include "utilities/align.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
 
-RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) :
-  _edge_queue(edge_queue) {
-}
+template <typename Delegate>
+RootSetClosure<Delegate>::RootSetClosure(Delegate* delegate) : _delegate(delegate) {}
 
-void RootSetClosure::do_oop(oop* ref) {
+template <typename Delegate>
+void RootSetClosure<Delegate>::do_oop(oop* ref) {
   assert(ref != NULL, "invariant");
   // We discard unaligned root references because
   // our reference tagging scheme will use
@@ -62,50 +61,39 @@
   }
 
   assert(is_aligned(ref, HeapWordSize), "invariant");
-  const oop pointee = *ref;
-  if (pointee != NULL) {
-    closure_impl(ref, pointee);
+  if (*ref != NULL) {
+    _delegate->do_root(ref);
   }
 }
 
-void RootSetClosure::do_oop(narrowOop* ref) {
+template <typename Delegate>
+void RootSetClosure<Delegate>::do_oop(narrowOop* ref) {
   assert(ref != NULL, "invariant");
   assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
   const oop pointee = RawAccess<>::oop_load(ref);
   if (pointee != NULL) {
-    closure_impl(UnifiedOop::encode(ref), pointee);
-  }
-}
-
-void RootSetClosure::closure_impl(const oop* reference, const oop pointee) {
-  if (!_edge_queue->is_full())  {
-    _edge_queue->add(NULL, reference);
+    _delegate->do_root(UnifiedOop::encode(ref));
   }
 }
 
-void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) {
-  RootSetClosure rs(edge_queue);
-  process_roots(&rs);
+class RootSetClosureMarkScope : public MarkScope {};
+
+template <typename Delegate>
+void RootSetClosure<Delegate>::process() {
+  RootSetClosureMarkScope mark_scope;
+  CLDToOopClosure cldt_closure(this, ClassLoaderData::_claim_none);
+  ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
+  CodeBlobToOopClosure blobs(this, false);
+  Threads::oops_do(this, &blobs);
+  ObjectSynchronizer::oops_do(this);
+  Universe::oops_do(this);
+  JNIHandles::oops_do(this);
+  JvmtiExport::oops_do(this);
+  SystemDictionary::oops_do(this);
+  Management::oops_do(this);
+  StringTable::oops_do(this);
+  AOTLoader::oops_do(this);
 }
 
-class RootSetClosureMarkScope : public MarkScope {
-};
-
-void RootSetClosure::process_roots(OopClosure* closure) {
-  SaveRestoreCLDClaimBits save_restore_cld_claim_bits;
-  RootSetClosureMarkScope mark_scope;
-
-  CLDToOopClosure cldt_closure(closure, ClassLoaderData::_claim_strong);
-  ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
-  CodeBlobToOopClosure blobs(closure, false);
-  Threads::oops_do(closure, &blobs);
-  ObjectSynchronizer::oops_do(closure);
-  Universe::oops_do(closure);
-  JNIHandles::oops_do(closure);
-  JvmtiExport::oops_do(closure);
-  SystemDictionary::oops_do(closure);
-  Management::oops_do(closure);
-  StringTable::oops_do(closure);
-  AOTLoader::oops_do(closure);
-  JVMCI_ONLY(JVMCI::oops_do(closure);)
-}
+template class RootSetClosure<BFSClosure>;
+template class RootSetClosure<DFSClosure>;
--- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -26,18 +26,14 @@
 #define SHARE_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
-class EdgeQueue;
-
+template <typename Delegate>
 class RootSetClosure: public BasicOopIterateClosure {
  private:
-  RootSetClosure(EdgeQueue* edge_queue);
-  EdgeQueue* _edge_queue;
-  void closure_impl(const oop* reference, const oop pointee);
+  Delegate* const _delegate;
  public:
-  static void add_to_queue(EdgeQueue* edge_queue);
-  static void process_roots(OopClosure* closure);
+  RootSetClosure(Delegate* delegate);
+  void process();
 
   virtual void do_oop(oop* reference);
   virtual void do_oop(narrowOop* reference);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/vmThread.hpp"
+
+EventEmitter::EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time) :
+  _start_time(start_time),
+  _end_time(end_time),
+  _thread(Thread::current()),
+  _jfr_thread_local(_thread->jfr_thread_local()),
+  _thread_id(_thread->jfr_thread_local()->thread_id()) {}
+
+EventEmitter::~EventEmitter() {
+  // restore / reset thread local stack trace and thread id
+  _jfr_thread_local->set_thread_id(_thread_id);
+  _jfr_thread_local->clear_cached_stack_trace();
+}
+
+void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all) {
+  assert(sampler != NULL, "invariant");
+
+  ResourceMark rm;
+  EdgeStore edge_store;
+  if (cutoff_ticks <= 0) {
+    // no reference chains
+    JfrTicks time_stamp = JfrTicks::now();
+    EventEmitter emitter(time_stamp, time_stamp);
+    emitter.write_events(sampler, &edge_store, emit_all);
+    return;
+  }
+  // events emitted with reference chains require a safepoint operation
+  PathToGcRootsOperation op(sampler, &edge_store, cutoff_ticks, emit_all);
+  VMThread::execute(&op);
+}
+
+size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) {
+  assert(_thread == Thread::current(), "invariant");
+  assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant");
+  assert(object_sampler != NULL, "invariant");
+  assert(edge_store != NULL, "invariant");
+
+  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
+  size_t count = 0;
+
+  const ObjectSample* current = object_sampler->first();
+  while (current != NULL) {
+    ObjectSample* prev = current->prev();
+    if (current->is_alive_and_older_than(last_sweep)) {
+      write_event(current, edge_store);
+      ++count;
+    }
+    current = prev;
+  }
+
+  if (count > 0) {
+    // serialize associated checkpoints and potential chains
+    ObjectSampleCheckpoint::write(object_sampler, edge_store, emit_all, _thread);
+  }
+  return count;
+}
+
+static int array_size(const oop object) {
+  assert(object != NULL, "invariant");
+  if (object->is_array()) {
+    return arrayOop(object)->length();
+  }
+  return min_jint;
+}
+
+void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
+  assert(sample != NULL, "invariant");
+  assert(!sample->is_dead(), "invariant");
+  assert(edge_store != NULL, "invariant");
+  assert(_jfr_thread_local != NULL, "invariant");
+
+  const oop* object_addr = sample->object_addr();
+  traceid gc_root_id = 0;
+  const Edge* edge = NULL;
+  if (SafepointSynchronize::is_at_safepoint()) {
+    edge = (const Edge*)(*object_addr)->mark();
+  }
+  if (edge == NULL) {
+    // In order to dump out a representation of the event
+    // even though it was not reachable / too long to reach,
+    // we need to register a top level edge for this object.
+    edge = edge_store->put(object_addr);
+  } else {
+    gc_root_id = edge_store->gc_root_id(edge);
+  }
+
+  assert(edge != NULL, "invariant");
+  const traceid object_id = edge_store->get_id(edge);
+  assert(object_id != 0, "invariant");
+
+  EventOldObjectSample e(UNTIMED);
+  e.set_starttime(_start_time);
+  e.set_endtime(_end_time);
+  e.set_allocationTime(sample->allocation_time());
+  e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
+  e.set_object(object_id);
+  e.set_arrayElements(array_size(edge->pointee()));
+  e.set_root(gc_root_id);
+
+  // Temporarily assigning both the stack trace id and thread id
+  // onto the thread local data structure of the emitter thread (for the duration
+  // of the commit() call). This trick provides a means to override
+  // the event generation mechanism by injecting externally provided id's.
+  // At this particular location, it allows us to emit an old object event
+  // supplying information from where the actual sampling occurred.
+  _jfr_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
+  assert(sample->has_thread(), "invariant");
+  _jfr_thread_local->set_thread_id(sample->thread_id());
+  e.commit();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
+#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+
+typedef u8 traceid;
+
+class EdgeStore;
+class JfrThreadLocal;
+class ObjectSample;
+class ObjectSampler;
+class Thread;
+
+class EventEmitter : public CHeapObj<mtTracing> {
+  friend class LeakProfiler;
+  friend class PathToGcRootsOperation;
+ private:
+  const JfrTicks& _start_time;
+  const JfrTicks& _end_time;
+  Thread* _thread;
+  JfrThreadLocal* _jfr_thread_local;
+  traceid _thread_id;
+
+  EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time);
+  ~EventEmitter();
+
+  void write_event(const ObjectSample* sample, EdgeStore* edge_store);
+  size_t write_events(ObjectSampler* sampler, EdgeStore* store, bool emit_all);
+
+  static void emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all);
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -24,10 +24,6 @@
 
 #include "precompiled.hpp"
 #include "jfr/jfrEvents.hpp"
-#include "jfr/recorder/jfrRecorder.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
-#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
 #include "jfr/leakprofiler/chains/edgeStore.hpp"
 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
@@ -37,12 +33,129 @@
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "jfr/metadata/jfrSerializer.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/thread.inline.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/growableArray.hpp"
+
+static bool predicate(GrowableArray<traceid>* set, traceid id) {
+  assert(set != NULL, "invariant");
+  bool found = false;
+  set->find_sorted<traceid, compare_traceid>(id, found);
+  return found;
+}
+
+static bool mutable_predicate(GrowableArray<traceid>* set, traceid id) {
+  assert(set != NULL, "invariant");
+  bool found = false;
+  const int location = set->find_sorted<traceid, compare_traceid>(id, found);
+  if (!found) {
+    set->insert_before(location, id);
+  }
+  return found;
+}
+
+static bool add(GrowableArray<traceid>* set, traceid id) {
+  assert(set != NULL, "invariant");
+  return mutable_predicate(set, id);
+}
+
+const int initial_array_size = 256;
+
+template <typename T>
+static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
+  return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
+}
+
+template <typename T>
+static GrowableArray<T>* resource_allocate_array(int size = initial_array_size) {
+  return new GrowableArray<T>(size);
+}
+
+static void sort_array(GrowableArray<traceid>* ar) {
+  assert(ar != NULL, "invariant");
+  ar->sort(sort_traceid);
+}
+
+static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
+
+class ThreadIdExclusiveAccess : public StackObj {
+ private:
+  static Semaphore _mutex_semaphore;
+ public:
+  ThreadIdExclusiveAccess() { _mutex_semaphore.wait(); }
+  ~ThreadIdExclusiveAccess() { _mutex_semaphore.signal(); }
+};
+
+Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
 
-template <typename SampleProcessor>
-static void do_samples(ObjectSample* sample, const ObjectSample* const end, SampleProcessor& processor) {
+static void add_to_unloaded_thread_set(traceid tid) {
+  ThreadIdExclusiveAccess lock;
+  if (unloaded_thread_id_set == NULL) {
+    unloaded_thread_id_set = c_heap_allocate_array<traceid>();
+  }
+  add(unloaded_thread_id_set, tid);
+}
+
+static bool has_thread_exited(traceid tid) {
+  assert(tid != 0, "invariant");
+  return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid);
+}
+
+static GrowableArray<traceid>* unloaded_set = NULL;
+
+static void sort_unloaded_set() {
+  if (unloaded_set != NULL) {
+    sort_array(unloaded_set);
+  }
+}
+
+static void add_to_unloaded_set(traceid klass_id) {
+  if (unloaded_set == NULL) {
+    unloaded_set = c_heap_allocate_array<traceid>();
+  }
+  unloaded_set->append(klass_id);
+}
+
+void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) {
+  assert(k != NULL, "invariant");
+  add_to_unloaded_set(TRACE_ID(k));
+}
+
+static bool is_klass_unloaded(traceid klass_id) {
+  return unloaded_set != NULL && predicate(unloaded_set, klass_id);
+}
+
+static GrowableArray<traceid>* id_set = NULL;
+static GrowableArray<traceid>* stack_trace_id_set = NULL;
+
+static bool is_processed(traceid id) {
+  assert(id != 0, "invariant");
+  assert(id_set != NULL, "invariant");
+  return mutable_predicate(id_set, id);
+}
+
+static bool is_processed_or_unloaded(traceid klass_id) {
+  assert(klass_id != 0, "invariant");
+  return is_processed(klass_id) || is_klass_unloaded(klass_id);
+}
+
+static bool should_process(traceid klass_id) {
+  return klass_id != 0 && !is_processed_or_unloaded(klass_id);
+}
+
+static bool is_stack_trace_processed(traceid stack_trace_id) {
+  assert(stack_trace_id != 0, "invariant");
+  assert(stack_trace_id_set != NULL, "invariant");
+  return mutable_predicate(stack_trace_id_set, stack_trace_id);
+}
+
+template <typename Processor>
+static void do_samples(ObjectSample* sample, const ObjectSample* const end, Processor& processor) {
   assert(sample != NULL, "invariant");
   while (sample != end) {
     processor.sample_do(sample);
@@ -50,6 +163,298 @@
   }
 }
 
+template <typename Processor>
+static void iterate_samples(Processor& processor, bool all = false, bool update_last_resolved = false) {
+  ObjectSampler* const sampler = ObjectSampler::sampler();
+  assert(sampler != NULL, "invariant");
+  ObjectSample* const last = sampler->last();
+  assert(last != NULL, "invariant");
+  do_samples(last, all ? NULL : sampler->last_resolved(), processor);
+  if (update_last_resolved) {
+    sampler->set_last_resolved(last);
+  }
+}
+
+void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  if (LeakProfiler::is_running()) {
+    add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id());
+  }
+}
+
+class CheckpointInstall {
+ private:
+  const JfrCheckpointBlobHandle& _cp;
+ public:
+  CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {}
+  void sample_do(ObjectSample* sample) {
+    assert(sample != NULL, "invariant");
+    if (!sample->is_dead()) {
+      sample->set_klass_checkpoint(_cp);
+    }
+  }
+};
+
+static void install_blob(JfrCheckpointWriter& writer) {
+  assert(writer.has_data(), "invariant");
+  const JfrCheckpointBlobHandle h_cp = writer.copy();
+  CheckpointInstall install(h_cp);
+  iterate_samples(install, true, false);
+}
+
+void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(LeakProfiler::is_running(), "invariant");
+  if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
+    install_blob(writer);
+  }
+}
+
+class ObjectResolver {
+ public:
+  ObjectResolver() {}
+  void sample_do(ObjectSample* sample) {
+    assert(sample != NULL, "invariant");
+    const traceid klass_id = sample->_klass_id;
+    if (klass_id != 0 || sample->is_dead() || is_klass_unloaded(klass_id)) {
+      return;
+    }
+    sample->_klass_id = JfrTraceId::use(sample->klass());
+  }
+};
+
+void ObjectSampleCheckpoint::resolve_sampled_objects() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(LeakProfiler::is_running(), "invariant");
+  if (ObjectSampler::sampler()->last() == NULL) {
+    return;
+  }
+  ObjectResolver resolver;
+  iterate_samples(resolver, false, true);
+}
+
+class SampleMark {
+ private:
+  ObjectSampleMarker& _marker;
+  jlong _last_sweep;
+  int _count;
+ public:
+  SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), _last_sweep(last_sweep), _count(0) {}
+  void sample_do(ObjectSample* sample) {
+    assert(sample != NULL, "invariant");
+    if (sample->is_alive_and_older_than(_last_sweep)) {
+      _marker.mark(sample->object());
+      ++_count;
+    }
+  }
+  int count() const {
+    return _count;
+  }
+};
+
+int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) {
+  assert(sampler != NULL, "invariant");
+  if (sampler->last() == NULL) {
+    return 0;
+  }
+  SampleMark mark(marker, emit_all ? max_jlong : sampler->last_sweep().value());
+  iterate_samples(mark, true, false);
+  return mark.count();
+}
+
+void ObjectSampleCheckpoint::tag(const ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  const traceid klass_id = sample->_klass_id;
+  if (should_process(sample->_klass_id)) {
+    JfrTraceId::use(sample->klass());
+  }
+}
+
+#ifdef ASSERT
+static traceid get_klass_id(const Klass* k) {
+  assert(k != NULL, "invariant");
+  return TRACE_ID(k);
+}
+#endif
+
+static traceid get_klass_id(traceid method_id) {
+  assert(method_id != 0, "invariant");
+  return method_id >> TRACE_ID_SHIFT;
+}
+
+static int get_method_id_num(traceid method_id) {
+  return (int)(method_id & METHOD_ID_NUM_MASK);
+}
+
+static Method* lookup_method_in_klasses(Klass* klass, int orig_method_id_num) {
+  assert(klass != NULL, "invariant");
+  assert(!is_klass_unloaded(get_klass_id(klass)), "invariant");
+  while (klass != NULL) {
+    if (klass->is_instance_klass()) {
+      Method* const m = InstanceKlass::cast(klass)->method_with_orig_idnum(orig_method_id_num);
+      if (m != NULL) {
+        return m;
+      }
+    }
+    klass = klass->super();
+  }
+  return NULL;
+}
+
+static Method* lookup_method_in_interfaces(Klass* klass, int orig_method_id_num) {
+  assert(klass != NULL, "invariant");
+  const Array<InstanceKlass*>* const all_ifs = InstanceKlass::cast(klass)->transitive_interfaces();
+  const int num_ifs = all_ifs->length();
+  for (int i = 0; i < num_ifs; i++) {
+    InstanceKlass* const ik = all_ifs->at(i);
+    Method* const m = ik->method_with_orig_idnum(orig_method_id_num);
+    if (m != NULL) {
+      return m;
+    }
+  }
+  return NULL;
+}
+
+static Method* lookup_method(Klass* klass, int orig_method_id_num) {
+  Method* m = lookup_method_in_klasses(klass, orig_method_id_num);
+  if (m == NULL) {
+    m = lookup_method_in_interfaces(klass, orig_method_id_num);
+  }
+  assert(m != NULL, "invariant");
+  return m;
+}
+
+static void write_stack_trace(traceid id, bool reached_root, u4 nr_of_frames, JfrCheckpointWriter* writer) {
+  assert(writer != NULL, "invariant");
+  writer->write(id);
+  writer->write((u1)!reached_root);
+  writer->write(nr_of_frames);
+}
+
+static void write_stack_frame(const JfrStackFrame* frame, JfrCheckpointWriter* writer) {
+  assert(frame != NULL, "invariant");
+  frame->write(*writer);
+}
+
+bool ObjectSampleCheckpoint::tag(const JfrStackTrace* trace, JfrCheckpointWriter* writer /* NULL */) {
+  assert(trace != NULL, "invariant");
+  if (is_stack_trace_processed(trace->id())) {
+    return false;
+  }
+  if (writer != NULL) {
+    // JfrStackTrace
+    write_stack_trace(trace->id(), trace->_reached_root, trace->_nr_of_frames, writer);
+  }
+  traceid last_id = 0;
+  for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
+    if (writer != NULL) {
+      // JfrStackFrame(s)
+      write_stack_frame(&trace->_frames[i], writer);
+    }
+    const traceid method_id = trace->_frames[i]._methodid;
+    if (last_id == method_id || is_processed(method_id) || is_klass_unloaded(get_klass_id(method_id))) {
+      continue;
+    }
+    last_id = method_id;
+    InstanceKlass* const ik = trace->_frames[i]._klass;
+    assert(ik != NULL, "invariant");
+    JfrTraceId::use(ik, lookup_method(ik, get_method_id_num(method_id)));
+  }
+  return true;
+}
+
+static bool stack_trace_precondition(const ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  return sample->has_stack_trace_id() && !sample->is_dead();
+}
+
+class Tagger {
+ private:
+  JfrStackTraceRepository& _stack_trace_repo;
+ public:
+  Tagger(JfrStackTraceRepository& stack_trace_repo) : _stack_trace_repo(stack_trace_repo) {}
+  void sample_do(ObjectSample* sample) {
+    ObjectSampleCheckpoint::tag(sample);
+    if (stack_trace_precondition(sample)) {
+      assert(sample->stack_trace_id() == sample->stack_trace()->id(), "invariant");
+      ObjectSampleCheckpoint::tag(sample->stack_trace(), NULL);
+    }
+  }
+};
+
+static void tag_old_traces(ObjectSample* last_resolved, JfrStackTraceRepository& stack_trace_repo) {
+  assert(last_resolved != NULL, "invariant");
+  assert(stack_trace_id_set != NULL, "invariant");
+  assert(stack_trace_id_set->is_empty(), "invariant");
+  Tagger tagger(stack_trace_repo);
+  do_samples(last_resolved, NULL, tagger);
+}
+
+class StackTraceInstall {
+ private:
+  JfrStackTraceRepository& _stack_trace_repo;
+ public:
+  StackTraceInstall(JfrStackTraceRepository& stack_trace_repo) : _stack_trace_repo(stack_trace_repo) {}
+  void install_to_sample(ObjectSample* sample, const JfrStackTrace* stack_trace);
+  void sample_do(ObjectSample* sample) {
+    ObjectSampleCheckpoint::tag(sample);
+    if (stack_trace_precondition(sample)) {
+      install_to_sample(sample, _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id()));
+    }
+  }
+};
+
+#ifdef ASSERT
+static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* trace) {
+  assert(sample != NULL, "invariant");
+  assert(trace != NULL, "invariant");
+  assert(trace->hash() == sample->stack_trace_hash(), "invariant");
+  assert(trace->id() == sample->stack_trace_id(), "invariant");
+}
+#endif
+
+void StackTraceInstall::install_to_sample(ObjectSample* sample, const JfrStackTrace* stack_trace) {
+  assert(sample != NULL, "invariant");
+  assert(stack_trace != NULL, "invariant");
+  DEBUG_ONLY(validate_stack_trace(sample, stack_trace));
+  JfrStackTrace* const sample_trace = const_cast<JfrStackTrace*>(sample->stack_trace());
+  if (sample_trace != NULL) {
+    *sample_trace = *stack_trace; // copy
+  } else {
+    sample->set_stack_trace(new JfrStackTrace(stack_trace->id(), *stack_trace, NULL)); // new
+  }
+  assert(sample->stack_trace() != NULL, "invariant");
+}
+
+static void install_new_stack_traces(JfrStackTraceRepository& stack_trace_repo) {
+  StackTraceInstall stack_trace_install(stack_trace_repo);
+  iterate_samples(stack_trace_install);
+  stack_trace_id_set->clear();
+}
+
+static void allocate_traceid_working_sets() {
+  const int set_size = JfrOptionSet::old_object_queue_size();
+  stack_trace_id_set = resource_allocate_array<traceid>(set_size);
+  id_set = resource_allocate_array<traceid>(set_size);
+  sort_unloaded_set();
+}
+
+// caller needs ResourceMark
+void ObjectSampleCheckpoint::rotate(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
+  assert(sampler != NULL, "invariant");
+  assert(LeakProfiler::is_running(), "invariant");
+  if (sampler->last() == NULL) {
+    // nothing to process
+    return;
+  }
+  allocate_traceid_working_sets();
+  install_new_stack_traces(stack_trace_repo);
+  ObjectSample* const last_resolved = const_cast<ObjectSample*>(sampler->last_resolved());
+  if (last_resolved != NULL) {
+    tag_old_traces(last_resolved, stack_trace_repo);
+  }
+}
+
 class RootSystemType : public JfrSerializer {
  public:
   void serialize(JfrCheckpointWriter& writer) {
@@ -74,247 +479,138 @@
   }
 };
 
-class CheckpointInstall {
- private:
-  const JfrCheckpointBlobHandle& _cp;
- public:
-  CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {}
-  void sample_do(ObjectSample* sample) {
-    assert(sample != NULL, "invariant");
-    if (!sample->is_dead()) {
-      sample->set_klass_checkpoint(_cp);
-    }
+static void register_serializers() {
+  static bool is_registered = false;
+  if (!is_registered) {
+    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, true, new RootSystemType());
+    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, true, new RootType());
+    is_registered = true;
+  }
+}
+
+static void reset_blob_write_state(const ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  if (sample->has_thread_checkpoint()) {
+    sample->thread_checkpoint()->reset_write_state();
+  }
+  if (sample->has_klass_checkpoint()) {
+    sample->klass_checkpoint()->reset_write_state();
   }
-};
+}
+
+static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
+  if (sample->has_thread_checkpoint() && has_thread_exited(sample->thread_id())) {
+    sample->thread_checkpoint()->exclusive_write(writer);
+  }
+}
+
+static void write_klass_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
+  if (sample->has_klass_checkpoint()) {
+    sample->klass_checkpoint()->exclusive_write(writer);
+  }
+}
+
+static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer) {
+  assert(sample != NULL, "invariant");
+  write_thread_blob(sample, writer);
+  write_klass_blob(sample, writer);
+}
 
 class CheckpointWrite {
  private:
+  const ObjectSampler* _sampler;
   JfrCheckpointWriter& _writer;
   const jlong _last_sweep;
  public:
-  CheckpointWrite(JfrCheckpointWriter& writer, jlong last_sweep) : _writer(writer), _last_sweep(last_sweep) {}
+  CheckpointWrite(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
+    _sampler(sampler), _writer(writer), _last_sweep(last_sweep) {}
   void sample_do(ObjectSample* sample) {
     assert(sample != NULL, "invariant");
     if (sample->is_alive_and_older_than(_last_sweep)) {
-      if (sample->has_thread_checkpoint()) {
-        const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
-        thread_cp->exclusive_write(_writer);
-      }
-      if (sample->has_klass_checkpoint()) {
-        const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
-        klass_cp->exclusive_write(_writer);
-      }
+      write_blobs(sample, _writer);
     }
   }
 };
 
 class CheckpointStateReset {
  private:
+  const ObjectSampler* _sampler;
   const jlong _last_sweep;
  public:
-  CheckpointStateReset(jlong last_sweep) : _last_sweep(last_sweep) {}
+  CheckpointStateReset(const ObjectSampler* sampler, jlong last_sweep) : _sampler(sampler), _last_sweep(last_sweep) {}
   void sample_do(ObjectSample* sample) {
     assert(sample != NULL, "invariant");
     if (sample->is_alive_and_older_than(_last_sweep)) {
-      if (sample->has_thread_checkpoint()) {
-        const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
-        thread_cp->reset_write_state();
-      }
-      if (sample->has_klass_checkpoint()) {
-        const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
-        klass_cp->reset_write_state();
-      }
+      reset_blob_write_state(sample);
     }
   }
 };
 
+static void reset_write_state_for_blobs(const ObjectSampler* sampler, jlong last_sweep) {
+  CheckpointStateReset state_reset(sampler, last_sweep);
+  iterate_samples(state_reset, true, false);
+}
+
+static void write_sample_blobs(const ObjectSampler* sampler, jlong last_sweep, Thread* thread) {
+  JfrCheckpointWriter writer(thread, false);
+  CheckpointWrite checkpoint_write(sampler, writer, last_sweep);
+  iterate_samples(checkpoint_write, true, false);
+  reset_write_state_for_blobs(sampler, last_sweep);
+}
+
 class StackTraceWrite {
  private:
   JfrStackTraceRepository& _stack_trace_repo;
   JfrCheckpointWriter& _writer;
+  const jlong _last_sweep;
   int _count;
  public:
-  StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer) :
-    _stack_trace_repo(stack_trace_repo), _writer(writer), _count(0) {
-    JfrStacktrace_lock->lock_without_safepoint_check();
-  }
-  ~StackTraceWrite() {
-    assert(JfrStacktrace_lock->owned_by_self(), "invariant");
-    JfrStacktrace_lock->unlock();
-  }
-
+  StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer, jlong last_sweep) :
+    _stack_trace_repo(stack_trace_repo), _writer(writer), _last_sweep(last_sweep), _count(0) {}
   void sample_do(ObjectSample* sample) {
-    assert(sample != NULL, "invariant");
-    if (!sample->is_dead()) {
-      if (sample->has_stack_trace()) {
-        JfrTraceId::use(sample->klass(), true);
-        _stack_trace_repo.write(_writer, sample->stack_trace_id(), sample->stack_trace_hash());
+    ObjectSampleCheckpoint::tag(sample);
+    if (stack_trace_precondition(sample) && sample->is_alive_and_older_than(_last_sweep)) {
+      assert(sample->stack_trace_id() == sample->stack_trace()->id(), "invariant");
+      if (ObjectSampleCheckpoint::tag(sample->stack_trace(), &_writer)) {
         ++_count;
       }
     }
   }
-
-  int count() const {
-    return _count;
-  }
-};
-
-class SampleMark {
- private:
-  ObjectSampleMarker& _marker;
-  jlong _last_sweep;
-  int _count;
- public:
-  SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker),
-                                                             _last_sweep(last_sweep),
-                                                             _count(0) {}
-  void sample_do(ObjectSample* sample) {
-    assert(sample != NULL, "invariant");
-    if (sample->is_alive_and_older_than(_last_sweep)) {
-      _marker.mark(sample->object());
-      ++_count;
-    }
-  }
-
   int count() const {
     return _count;
   }
 };
 
-void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) {
-  assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant");
-
-  if (!writer.has_data()) {
-    if (!class_unload) {
-      LeakProfiler::resume();
-    }
-    assert(LeakProfiler::is_running(), "invariant");
+static void write_and_tag_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepository& repo, jlong last_sweep, Thread* thread) {
+  assert(sampler != NULL, "invariant");
+  allocate_traceid_working_sets();
+  install_new_stack_traces(repo);
+  JfrCheckpointWriter writer(thread);
+  const JfrCheckpointContext ctx = writer.context();
+  writer.write_type(TYPE_STACKTRACE);
+  const jlong count_offset = writer.reserve(sizeof(u4));
+  StackTraceWrite sw(repo, writer, last_sweep);
+  do_samples(sampler->last(), NULL, sw);
+  if (sw.count() == 0) {
+    writer.set_context(ctx);
     return;
   }
-
-  assert(writer.has_data(), "invariant");
-  const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
-
-  const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  const ObjectSample* const last_resolved = object_sampler->last_resolved();
-  CheckpointInstall install(h_cp);
-
-  if (class_unload) {
-    if (last != NULL) {
-      // all samples need the class unload information
-      do_samples(last, NULL, install);
-    }
-    assert(LeakProfiler::is_running(), "invariant");
-    return;
-  }
-
-  // only new samples since last resolved checkpoint
-  if (last != last_resolved) {
-    do_samples(last, last_resolved, install);
-    if (resume) {
-      const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
-    }
-  }
-  assert(LeakProfiler::is_suspended(), "invariant");
-  if (resume) {
-    LeakProfiler::resume();
-    assert(LeakProfiler::is_running(), "invariant");
-  }
+  writer.write_count((u4)sw.count(), count_offset);
 }
 
-void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) {
+void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
+  assert(sampler != NULL, "invariant");
   assert(edge_store != NULL, "invariant");
   assert(thread != NULL, "invariant");
-  static bool types_registered = false;
-  if (!types_registered) {
-    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, true, new RootSystemType());
-    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, true, new RootType());
-    types_registered = true;
-  }
-  const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  {
-    JfrCheckpointWriter writer(false, false, thread);
-    CheckpointWrite checkpoint_write(writer, last_sweep);
-    do_samples(last, NULL, checkpoint_write);
-  }
-  CheckpointStateReset state_reset(last_sweep);
-  do_samples(last, NULL, state_reset);
+  register_serializers();
+  // sample set is predicated on time of last sweep
+  const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
+  write_and_tag_stack_traces(sampler, JfrStackTraceRepository::instance(), last_sweep, thread);
+  write_sample_blobs(sampler, last_sweep, thread);
+  // write reference chains
   if (!edge_store->is_empty()) {
-    // java object and chain representations
-    JfrCheckpointWriter writer(false, true, thread);
+    JfrCheckpointWriter writer(thread);
     ObjectSampleWriter osw(writer, edge_store);
-    edge_store->iterate_edges(osw);
+    edge_store->iterate(osw);
   }
 }
-
-WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) :
-  _stack_trace_repo(repo) {
-}
-
-bool WriteObjectSampleStacktrace::process() {
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  if (!LeakProfiler::is_running()) {
-    return true;
-  }
-  // Suspend the LeakProfiler subsystem
-  // to ensure stable samples even
-  // after we return from the safepoint.
-  LeakProfiler::suspend();
-  assert(!LeakProfiler::is_running(), "invariant");
-  assert(LeakProfiler::is_suspended(), "invariant");
-
-  const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  assert(LeakProfiler::is_suspended(), "invariant");
-
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  const ObjectSample* const last_resolved = object_sampler->last_resolved();
-  if (last == last_resolved) {
-    assert(LeakProfiler::is_suspended(), "invariant");
-    return true;
-  }
-
-  JfrCheckpointWriter writer(false, true, Thread::current());
-  const JfrCheckpointContext ctx = writer.context();
-
-  writer.write_type(TYPE_STACKTRACE);
-  const jlong count_offset = writer.reserve(sizeof(u4));
-
-  int count = 0;
-  {
-    StackTraceWrite stack_trace_write(_stack_trace_repo, writer); // JfrStacktrace_lock
-    do_samples(last, last_resolved, stack_trace_write);
-    count = stack_trace_write.count();
-  }
-  if (count == 0) {
-    writer.set_context(ctx);
-    assert(LeakProfiler::is_suspended(), "invariant");
-    return true;
-  }
-  assert(count > 0, "invariant");
-  writer.write_count((u4)count, count_offset);
-  JfrStackTraceRepository::write_metadata(writer);
-
-  ObjectSampleCheckpoint::install(writer, false, false);
-  assert(LeakProfiler::is_suspended(), "invariant");
-  return true;
-}
-
-int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) {
-  const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  if (last == NULL) {
-    return 0;
-  }
-  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
-  SampleMark mark(marker, last_sweep);
-  do_samples(last, NULL, mark);
-  return mark.count();
-}
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -26,26 +26,29 @@
 #define SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
 
 #include "memory/allocation.hpp"
-#include "utilities/exceptions.hpp"
 
 class EdgeStore;
-class JfrStackTraceRepository;
+class Klass;
+class JavaThread;
 class JfrCheckpointWriter;
+class JfrStackTrace;
+class JfrStackTraceRepository;
+class ObjectSample;
 class ObjectSampleMarker;
+class ObjectSampler;
+class Thread;
 
 class ObjectSampleCheckpoint : AllStatic {
  public:
-  static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume);
-  static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread);
-  static int mark(ObjectSampleMarker& marker, bool emit_all);
-};
-
-class WriteObjectSampleStacktrace : public StackObj {
- private:
-  JfrStackTraceRepository& _stack_trace_repo;
- public:
-  WriteObjectSampleStacktrace(JfrStackTraceRepository& repo);
-  bool process();
+  static void on_klass_unload(const Klass* k);
+  static void on_type_set_unload(JfrCheckpointWriter& writer);
+  static void on_thread_exit(JavaThread* jt);
+  static void resolve_sampled_objects();
+  static void rotate(const ObjectSampler* sampler, JfrStackTraceRepository& repo);
+  static void tag(const ObjectSample* sample);
+  static bool tag(const JfrStackTrace* trace, JfrCheckpointWriter* writer = NULL);
+  static int save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
+  static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,7 @@
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
-#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
-#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp"
+#include "jfr/writers/jfrTypeWriterHost.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 #include "utilities/growableArray.hpp"
@@ -159,6 +158,11 @@
     return stored->_field_modifiers == query->_field_modifiers;
   }
 
+  void unlink(FieldInfoEntry* entry) {
+    assert(entry != NULL, "invariant");
+    // nothing
+  }
+
  public:
   FieldTable() : _table(new FieldInfoTable(this)) {}
   ~FieldTable() {
@@ -196,7 +200,7 @@
 static FieldTable* field_infos = NULL;
 static RootDescriptionInfo* root_infos = NULL;
 
-int __write_sample_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* si) {
+int __write_sample_info__(JfrCheckpointWriter* writer, const void* si) {
   assert(writer != NULL, "invariant");
   assert(si != NULL, "invariant");
   const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si;
@@ -211,17 +215,17 @@
   return 1;
 }
 
-typedef JfrArtifactWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl;
-typedef JfrArtifactWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter;
+typedef JfrTypeWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl;
+typedef JfrTypeWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter;
 
 static void write_sample_infos(JfrCheckpointWriter& writer) {
   if (sample_infos != NULL) {
-    SampleWriter sw(&writer, NULL, false);
+    SampleWriter sw(&writer);
     sample_infos->iterate(sw);
   }
 }
 
-int __write_reference_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ri) {
+int __write_reference_info__(JfrCheckpointWriter* writer, const void* ri) {
   assert(writer != NULL, "invariant");
   assert(ri != NULL, "invariant");
   const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri;
@@ -233,17 +237,17 @@
   return 1;
 }
 
-typedef JfrArtifactWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl;
-typedef JfrArtifactWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter;
+typedef JfrTypeWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl;
+typedef JfrTypeWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter;
 
 static void write_reference_infos(JfrCheckpointWriter& writer) {
   if (ref_infos != NULL) {
-    ReferenceWriter rw(&writer, NULL, false);
+    ReferenceWriter rw(&writer);
     ref_infos->iterate(rw);
   }
 }
 
-int __write_array_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ai) {
+int __write_array_info__(JfrCheckpointWriter* writer, const void* ai) {
   assert(writer != NULL, "invariant");
   assert(ai != NULL, "invariant");
   const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai;
@@ -270,17 +274,17 @@
   return array_infos->store(osai);
 }
 
-typedef JfrArtifactWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl;
-typedef JfrArtifactWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter;
+typedef JfrTypeWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl;
+typedef JfrTypeWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter;
 
 static void write_array_infos(JfrCheckpointWriter& writer) {
   if (array_infos != NULL) {
-    ArrayWriter aw(&writer, NULL, false);
+    ArrayWriter aw(&writer);
     array_infos->iterate(aw);
   }
 }
 
-int __write_field_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* fi) {
+int __write_field_info__(JfrCheckpointWriter* writer, const void* fi) {
   assert(writer != NULL, "invariant");
   assert(fi != NULL, "invariant");
   const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi;
@@ -314,12 +318,12 @@
   return field_infos->store(osfi);
 }
 
-typedef JfrArtifactWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl;
-typedef JfrArtifactWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter;
+typedef JfrTypeWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl;
+typedef JfrTypeWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter;
 
 static void write_field_infos(JfrCheckpointWriter& writer) {
   if (field_infos != NULL) {
-    FieldWriter fw(&writer, NULL, false);
+    FieldWriter fw(&writer);
     field_infos->iterate(fw);
   }
 }
@@ -339,7 +343,7 @@
   return description.description();
 }
 
-int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* di) {
+int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di) {
   assert(writer != NULL, "invariant");
   assert(di != NULL, "invariant");
   const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di;
@@ -350,7 +354,7 @@
   return 1;
 }
 
-static traceid get_root_description_info_id(const Edge& edge, traceid id) {
+static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) {
   assert(edge.is_root(), "invariant");
   if (EdgeUtils::is_leak_edge(edge)) {
     return 0;
@@ -366,8 +370,8 @@
   return root_infos->store(oodi);
 }
 
-typedef JfrArtifactWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl;
-typedef JfrArtifactWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter;
+typedef JfrTypeWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl;
+typedef JfrTypeWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter;
 
 
 int _edge_reference_compare_(uintptr_t lhs, uintptr_t rhs) {
@@ -513,12 +517,12 @@
     RootResolutionSet rrs(root_infos);
     RootResolver::resolve(rrs);
     // write roots
-    RootDescriptionWriter rw(&writer, NULL, false);
+    RootDescriptionWriter rw(&writer);
     root_infos->iterate(rw);
   }
 }
 
-static void add_old_object_sample_info(const Edge* current, traceid id) {
+static void add_old_object_sample_info(const StoredEdge* current, traceid id) {
   assert(current != NULL, "invariant");
   if (sample_infos == NULL) {
     sample_infos = new SampleInfo();
@@ -528,11 +532,11 @@
   assert(oosi != NULL, "invariant");
   oosi->_id = id;
   oosi->_data._object = current->pointee();
-  oosi->_data._reference_id = current->is_root() ? (traceid)0 : id;
+  oosi->_data._reference_id = current->parent() == NULL ? (traceid)0 : id;
   sample_infos->store(oosi);
 }
 
-static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) {
+static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) {
   assert(current != NULL, "invariant");
   if (ref_infos == NULL) {
     ref_infos = new RefInfo();
@@ -544,37 +548,43 @@
 
   ri->_id = id;
   ri->_data._array_info_id =  !current->is_skip_edge() ? get_array_info_id(*current, id) : 0;
-  ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ?
-                               get_field_info_id(*current) : (traceid)0;
+  ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? get_field_info_id(*current) : (traceid)0;
   ri->_data._old_object_sample_id = parent_id;
   ri->_data._skip = current->skip_length();
   ref_infos->store(ri);
 }
 
-static traceid add_root_info(const Edge* root, traceid id) {
-  assert(root != NULL, "invariant");
-  assert(root->is_root(), "invariant");
-  return get_root_description_info_id(*root, id);
+static bool is_gc_root(const StoredEdge* current) {
+  assert(current != NULL, "invariant");
+  return current->parent() == NULL && current->gc_root_id() != 0;
 }
 
-void ObjectSampleWriter::write(const RoutableEdge* edge) {
+static traceid add_gc_root_info(const StoredEdge* root, traceid id) {
+  assert(root != NULL, "invariant");
+  assert(is_gc_root(root), "invariant");
+  return get_gc_root_description_info_id(*root, id);
+}
+
+void ObjectSampleWriter::write(const StoredEdge* edge) {
   assert(edge != NULL, "invariant");
   const traceid id = _store->get_id(edge);
   add_old_object_sample_info(edge, id);
-  const RoutableEdge* parent = edge->logical_parent();
+  const StoredEdge* const parent = edge->parent();
   if (parent != NULL) {
     add_reference_info(edge, id, _store->get_id(parent));
   } else {
-    assert(edge->is_root(), "invariant");
-    add_root_info(edge, id);
+    if (is_gc_root(edge)) {
+      assert(edge->gc_root_id() == id, "invariant");
+      add_gc_root_info(edge, id);
+    }
   }
 }
 
-ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) :
+ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) :
   _writer(writer),
   _store(store) {
   assert(store != NULL, "invariant");
-  assert(store->number_of_entries() > 0, "invariant");
+  assert(!store->is_empty(), "invariant");
   sample_infos = NULL;
   ref_infos = NULL;
   array_infos = NULL;
@@ -590,26 +600,7 @@
   write_root_descriptors(_writer);
 }
 
-void ObjectSampleWriter::write_chain(const RoutableEdge& edge) {
-  assert(EdgeUtils::is_leak_edge(edge), "invariant");
-  if (edge.processed()) {
-    return;
-  }
-  EdgeUtils::collapse_chain(edge);
-  const RoutableEdge* current = &edge;
-  while (current != NULL) {
-    if (current->processed()) {
-      return;
-    }
-    write(current);
-    current->set_processed();
-    current = current->logical_parent();
-  }
-}
-
-bool ObjectSampleWriter::operator()(const RoutableEdge& edge) {
-  if (EdgeUtils::is_leak_edge(edge)) {
-    write_chain(edge);
-  }
+bool ObjectSampleWriter::operator()(StoredEdge& e) {
+  write(&e);
   return true;
 }
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -30,21 +30,17 @@
 class Edge;
 class EdgeStore;
 class JfrCheckpointWriter;
-class RoutableEdge;
+class StoredEdge;
 
 class ObjectSampleWriter : public StackObj {
  private:
   JfrCheckpointWriter& _writer;
-  const EdgeStore* const _store;
-
-  void write(const RoutableEdge* edge);
-  void write_chain(const RoutableEdge& edge);
-
+  EdgeStore* const _store;
+  void write(const StoredEdge* edge);
  public:
-  ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store);
+  ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store);
   ~ObjectSampleWriter();
-
-  bool operator()(const RoutableEdge& edge);
+  bool operator()(StoredEdge& edge);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -41,9 +41,6 @@
 #include "runtime/vframe_hp.hpp"
 #include "services/management.hpp"
 #include "utilities/growableArray.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
 
 class ReferenceLocateClosure : public OopClosure {
  protected:
@@ -106,7 +103,6 @@
   bool do_management_roots();
   bool do_string_table_roots();
   bool do_aot_loader_roots();
-  JVMCI_ONLY(bool do_jvmci_roots();)
 
   bool do_roots();
 
@@ -132,7 +128,7 @@
 bool ReferenceToRootClosure::do_cldg_roots() {
   assert(!complete(), "invariant");
   ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, NULL);
-  CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_strong);
+  CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_none);
   ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
   return rlc.complete();
 }
@@ -193,15 +189,6 @@
   return rcl.complete();
 }
 
-#if INCLUDE_JVMCI
-bool ReferenceToRootClosure::do_jvmci_roots() {
-  assert(!complete(), "invariant");
-  ReferenceLocateClosure rcl(_callback, OldObjectRoot::_jvmci, OldObjectRoot::_type_undetermined, NULL);
-  JVMCI::oops_do(&rcl);
-  return rcl.complete();
-}
-#endif
-
 bool ReferenceToRootClosure::do_roots() {
   assert(!complete(), "invariant");
   assert(OldObjectRoot::_system_undetermined == _info._system, "invariant");
@@ -252,13 +239,6 @@
     return true;
   }
 
-#if INCLUDE_JVMCI
-  if (do_jvmci_roots()) {
-   _complete = true;
-    return true;
-  }
-#endif
-
   return false;
 }
 
@@ -436,9 +416,6 @@
 };
 
 void RootResolver::resolve(RootCallback& callback) {
-
-  // Need to clear cld claim bit before starting
-  ClassLoaderDataGraph::clear_claimed_marks();
   RootResolverMarkScope mark_scope;
 
   // thread local roots
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -25,8 +25,8 @@
 #ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
 #define SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
 
+#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "memory/allocation.hpp"
-#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "oops/oopsHierarchy.hpp"
 
 struct RootCallbackInfo {
--- a/src/hotspot/share/jfr/leakprofiler/emitEventOperation.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#include "precompiled.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "jfr/jfrEvents.hpp"
-#include "jfr/leakprofiler/utilities/granularTimer.hpp"
-#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
-#include "jfr/leakprofiler/chains/edge.hpp"
-#include "jfr/leakprofiler/chains/edgeQueue.hpp"
-#include "jfr/leakprofiler/chains/edgeStore.hpp"
-#include "jfr/leakprofiler/chains/bitset.hpp"
-#include "jfr/leakprofiler/sampling/objectSample.hpp"
-#include "jfr/leakprofiler/leakProfiler.hpp"
-#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
-#include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/leakprofiler/emitEventOperation.hpp"
-#include "jfr/leakprofiler/chains/bfsClosure.hpp"
-#include "jfr/leakprofiler/chains/dfsClosure.hpp"
-#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-#include "jfr/support/jfrThreadId.hpp"
-#include "logging/log.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/markOop.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-/* The EdgeQueue is backed by directly managed virtual memory.
- * We will attempt to dimension an initial reservation
- * in proportion to the size of the heap (represented by heap_region).
- * Initial memory reservation: 5% of the heap OR at least 32 Mb
- * Commit ratio: 1 : 10 (subject to allocation granularties)
- */
-static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
-  const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
-  assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
-  return memory_reservation_bytes;
-}
-
-static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
-  const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
-  assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
-  return memory_commit_block_size_bytes;
-}
-
-static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
-  log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
-  log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
-  log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
-  if (edge_queue.reserved_size() > 0) {
-    log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
-      ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
-  }
-}
-
-void EmitEventOperation::doit() {
-  assert(LeakProfiler::is_running(), "invariant");
-  _object_sampler = LeakProfiler::object_sampler();
-  assert(_object_sampler != NULL, "invariant");
-
-  _vm_thread = VMThread::vm_thread();
-  assert(_vm_thread == Thread::current(), "invariant");
-  _vm_thread_local = _vm_thread->jfr_thread_local();
-  assert(_vm_thread_local != NULL, "invariant");
-  assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  // The VM_Operation::evaluate() which invoked doit()
-  // contains a top level ResourceMark
-
-  // save the original markWord for the potential leak objects
-  // to be restored on function exit
-  ObjectSampleMarker marker;
-  if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
-    return;
-  }
-
-  EdgeStore edge_store;
-
-  GranularTimer::start(_cutoff_ticks, 1000000);
-  if (_cutoff_ticks <= 0) {
-    // no chains
-    write_events(&edge_store);
-    return;
-  }
-
-  assert(_cutoff_ticks > 0, "invariant");
-
-  // The bitset used for marking is dimensioned as a function of the heap size
-  const MemRegion heap_region = Universe::heap()->reserved_region();
-  BitSet mark_bits(heap_region);
-
-  // The edge queue is dimensioned as a fraction of the heap size
-  const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
-  EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
-
-  // The initialize() routines will attempt to reserve and allocate backing storage memory.
-  // Failure to accommodate will render root chain processing impossible.
-  // As a fallback on failure, just write out the existing samples, flat, without chains.
-  if (!(mark_bits.initialize() && edge_queue.initialize())) {
-    log_warning(jfr)("Unable to allocate memory for root chain processing");
-    write_events(&edge_store);
-    return;
-  }
-
-  // necessary condition for attempting a root set iteration
-  Universe::heap()->ensure_parsability(false);
-
-  RootSetClosure::add_to_queue(&edge_queue);
-  if (edge_queue.is_full()) {
-    // Pathological case where roots don't fit in queue
-    // Do a depth-first search, but mark roots first
-    // to avoid walking sideways over roots
-    DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
-  } else {
-    BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
-    bfs.process();
-  }
-  GranularTimer::stop();
-  write_events(&edge_store);
-  log_edge_queue_summary(edge_queue);
-}
-
-int EmitEventOperation::write_events(EdgeStore* edge_store) {
-  assert(_object_sampler != NULL, "invariant");
-  assert(edge_store != NULL, "invariant");
-  assert(_vm_thread != NULL, "invariant");
-  assert(_vm_thread_local != NULL, "invariant");
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-
-  // save thread id in preparation for thread local trace data manipulations
-  const traceid vmthread_id = _vm_thread_local->thread_id();
-  assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
-  int count = 0;
-
-  const ObjectSample* current = _object_sampler->first();
-  while (current != NULL) {
-    ObjectSample* prev = current->prev();
-    if (current->is_alive_and_older_than(last_sweep)) {
-      write_event(current, edge_store);
-      ++count;
-    }
-    current = prev;
-  }
-
-  // restore thread local stack trace and thread id
-  _vm_thread_local->set_thread_id(vmthread_id);
-  _vm_thread_local->clear_cached_stack_trace();
-  assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  if (count > 0) {
-    // serialize assoicated checkpoints
-    ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
-  }
-  return count;
-}
-
-static int array_size(const oop object) {
-  assert(object != NULL, "invariant");
-  if (object->is_array()) {
-    return arrayOop(object)->length();
-  }
-  return min_jint;
-}
-
-void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
-  assert(sample != NULL, "invariant");
-  assert(!sample->is_dead(), "invariant");
-  assert(edge_store != NULL, "invariant");
-  assert(_vm_thread_local != NULL, "invariant");
-  const oop* object_addr = sample->object_addr();
-  assert(*object_addr != NULL, "invariant");
-
-  const Edge* edge = (const Edge*)(*object_addr)->mark();
-  traceid gc_root_id = 0;
-  if (edge == NULL) {
-    // In order to dump out a representation of the event
-    // even though it was not reachable / too long to reach,
-    // we need to register a top level edge for this object
-    Edge e(NULL, object_addr);
-    edge_store->add_chain(&e, 1);
-    edge = (const Edge*)(*object_addr)->mark();
-  } else {
-    gc_root_id = edge_store->get_root_id(edge);
-  }
-
-  assert(edge != NULL, "invariant");
-  assert(edge->pointee() == *object_addr, "invariant");
-  const traceid object_id = edge_store->get_id(edge);
-  assert(object_id != 0, "invariant");
-
-  EventOldObjectSample e(UNTIMED);
-  e.set_starttime(GranularTimer::start_time());
-  e.set_endtime(GranularTimer::end_time());
-  e.set_allocationTime(sample->allocation_time());
-  e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
-  e.set_object(object_id);
-  e.set_arrayElements(array_size(*object_addr));
-  e.set_root(gc_root_id);
-
-  // Temporarily assigning both the stack trace id and thread id
-  // onto the thread local data structure of the VMThread (for the duration
-  // of the commit() call). This trick provides a means to override
-  // the event generation mechanism by injecting externally provided id's.
-  // Here, in particular, this allows us to emit an old object event
-  // supplying information from where the actual sampling occurred.
-  _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
-  assert(sample->has_thread(), "invariant");
-  _vm_thread_local->set_thread_id(sample->thread_id());
-  e.commit();
-}
--- a/src/hotspot/share/jfr/leakprofiler/emitEventOperation.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
-#define SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
-
-#include "runtime/vmOperations.hpp"
-
-class BFSClosure;
-class EdgeStore;
-class EdgeQueue;
-class JfrThreadData;
-class ObjectSample;
-class ObjectSampler;
-
-class VMThread;
-
-// Safepoint operation for emitting object sample events
-class EmitEventOperation : public VM_Operation {
- private:
-  jlong _cutoff_ticks;
-  bool _emit_all;
-  VMThread* _vm_thread;
-  JfrThreadLocal* _vm_thread_local;
-  ObjectSampler* _object_sampler;
-
-  void write_event(const ObjectSample* sample, EdgeStore* edge_store);
-  int write_events(EdgeStore* edge_store);
-
- public:
-  EmitEventOperation(jlong cutoff_ticks, bool emit_all) :
-    _cutoff_ticks(cutoff_ticks),
-    _emit_all(emit_all),
-    _vm_thread(NULL),
-    _vm_thread_local(NULL),
-    _object_sampler(NULL) {
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
-
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  virtual void doit();
-};
-
-#endif // SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
--- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,25 +23,31 @@
  */
 
 #include "precompiled.hpp"
-#include "jfr/leakprofiler/emitEventOperation.hpp"
 #include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/startOperation.hpp"
 #include "jfr/leakprofiler/stopOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "logging/log.hpp"
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
-#include "utilities/ostream.hpp"
+
+bool LeakProfiler::is_running() {
+  return ObjectSampler::is_created();
+}
 
-// Only to be updated during safepoint
-ObjectSampler* LeakProfiler::_object_sampler = NULL;
+bool LeakProfiler::start(int sample_count) {
+  if (is_running()) {
+    return true;
+  }
 
-static volatile jbyte suspended = 0;
-bool LeakProfiler::start(jint sample_count) {
+  // Allows user to disable leak profiler on command line by setting queue size to zero.
+  if (sample_count == 0) {
+    return false;
+  }
+
   if (UseZGC) {
     log_warning(jfr)("LeakProfiler is currently not supported in combination with ZGC");
     return false;
@@ -52,49 +58,56 @@
     return false;
   }
 
-  if (_object_sampler != NULL) {
-    // already started
-    return true;
+  assert(!is_running(), "invariant");
+  assert(sample_count > 0, "invariant");
+
+  // schedule the safepoint operation for installing the object sampler
+  StartOperation op(sample_count);
+  VMThread::execute(&op);
+
+  if (!is_running()) {
+    log_trace(jfr, system)("Object sampling could not be started because the sampler could not be allocated");
+    return false;
   }
-  // Allows user to disable leak profiler on command line by setting queue size to zero.
-  if (sample_count > 0) {
-    StartOperation op(sample_count);
-    VMThread::execute(&op);
-    return _object_sampler != NULL;
-  }
-  return false;
+  assert(is_running(), "invariant");
+  log_trace(jfr, system)("Object sampling started");
+  return true;
 }
 
 bool LeakProfiler::stop() {
-  if (_object_sampler == NULL) {
-    // already stopped/not started
-    return true;
+  if (!is_running()) {
+    return false;
   }
+
+  // schedule the safepoint operation for uninstalling and destroying the object sampler
   StopOperation op;
   VMThread::execute(&op);
-  return _object_sampler == NULL;
+
+  assert(!is_running(), "invariant");
+  log_trace(jfr, system)("Object sampling stopped");
+  return true;
 }
 
-void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) {
+void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all) {
   if (!is_running()) {
     return;
   }
-  EmitEventOperation op(cutoff_ticks, emit_all);
-  VMThread::execute(&op);
+  // exclusive access to object sampler instance
+  ObjectSampler* const sampler = ObjectSampler::acquire();
+  assert(sampler != NULL, "invariant");
+  EventEmitter::emit(sampler, cutoff_ticks, emit_all);
+  ObjectSampler::release();
 }
 
 void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(),
     "Leak Profiler::oops_do(...) may only be called during safepoint");
-
-  if (_object_sampler != NULL) {
-    _object_sampler->oops_do(is_alive, f);
+  if (is_running()) {
+    ObjectSampler::oops_do(is_alive, f);
   }
 }
 
-void LeakProfiler::sample(HeapWord* object,
-                          size_t size,
-                          JavaThread* thread) {
+void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) {
   assert(is_running(), "invariant");
   assert(thread != NULL, "invariant");
   assert(thread->thread_state() == _thread_in_vm, "invariant");
@@ -104,39 +117,5 @@
     return;
   }
 
-  _object_sampler->add(object, size, thread);
-}
-
-ObjectSampler* LeakProfiler::object_sampler() {
-  assert(is_suspended() || SafepointSynchronize::is_at_safepoint(),
-    "Leak Profiler::object_sampler() may only be called during safepoint");
-  return _object_sampler;
-}
-
-void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) {
-  assert(SafepointSynchronize::is_at_safepoint(),
-    "Leak Profiler::set_object_sampler() may only be called during safepoint");
-  _object_sampler = object_sampler;
-}
-
-bool LeakProfiler::is_running() {
-  return _object_sampler != NULL && !suspended;
+  ObjectSampler::sample(object, size, thread);
 }
-
-bool LeakProfiler::is_suspended() {
-  return _object_sampler != NULL && suspended;
-}
-
-void LeakProfiler::resume() {
-  assert(is_suspended(), "invariant");
-  OrderAccess::storestore();
-  Atomic::store((jbyte)0, &suspended);
-  assert(is_running(), "invariant");
-}
-
-void LeakProfiler::suspend() {
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  assert(_object_sampler != NULL, "invariant");
-  assert(!is_suspended(), "invariant");
-  suspended = (jbyte)1; // safepoint visible
-}
--- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -28,36 +28,16 @@
 #include "memory/allocation.hpp"
 
 class BoolObjectClosure;
-class ObjectSampler;
 class OopClosure;
 class JavaThread;
-class Thread;
 
 class LeakProfiler : public AllStatic {
-  friend class ClassUnloadTypeSet;
-  friend class EmitEventOperation;
-  friend class ObjectSampleCheckpoint;
-  friend class StartOperation;
-  friend class StopOperation;
-  friend class TypeSet;
-  friend class WriteObjectSampleStacktrace;
-
- private:
-  static ObjectSampler* _object_sampler;
-
-  static void set_object_sampler(ObjectSampler* object_sampler);
-  static ObjectSampler* object_sampler();
-
-  static void suspend();
-  static void resume();
-  static bool is_suspended();
-
  public:
-  static bool start(jint sample_count);
+  static bool start(int sample_count);
   static bool stop();
-  static void emit_events(jlong cutoff_ticks, bool emit_all);
   static bool is_running();
 
+  static void emit_events(int64_t cutoff_ticks, bool emit_all);
   static void sample(HeapWord* object, size_t size, JavaThread* thread);
 
   // Called by GC
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -26,12 +26,14 @@
 #define SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
 
 #include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
 #include "jfr/utilities/jfrAllocation.hpp"
 #include "jfr/utilities/jfrTime.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
 #include "utilities/ticks.hpp"
+
 /*
  * Handle for diagnosing Java memory leaks.
  *
@@ -39,17 +41,22 @@
  * allocated, the thread and the stack trace.
  */
 class ObjectSample : public JfrCHeapObj {
+  friend class CheckpointInstall;
+  friend class ObjectResolver;
+  friend class ObjectSampleCheckpoint;
   friend class ObjectSampler;
   friend class SampleList;
  private:
   ObjectSample* _next;
   ObjectSample* _previous;
+  mutable const JfrStackTrace* _stack_trace;
   JfrCheckpointBlobHandle _thread_cp;
   JfrCheckpointBlobHandle _klass_cp;
   oop _object;
   Ticks _allocation_time;
   traceid _stack_trace_id;
   traceid _thread_id;
+  mutable traceid _klass_id;
   int _index;
   size_t _span;
   size_t _allocated;
@@ -72,20 +79,29 @@
 
   void reset() {
     set_stack_trace_id(0);
-    set_stack_trace_hash(0),
+    set_stack_trace_hash(0);
+    _klass_id = 0;
     release_references();
     _dead = false;
   }
 
+  ~ObjectSample() {
+    if (_stack_trace != NULL) {
+      delete _stack_trace;
+    }
+  }
+
  public:
   ObjectSample() : _next(NULL),
                    _previous(NULL),
+                   _stack_trace(NULL),
                    _thread_cp(),
                    _klass_cp(),
                    _object(NULL),
                    _allocation_time(),
                    _stack_trace_id(0),
                    _thread_id(0),
+                   _klass_id(0),
                    _index(0),
                    _span(0),
                    _allocated(0),
@@ -174,7 +190,7 @@
     return _heap_used_at_last_gc;
   }
 
-  bool has_stack_trace() const {
+  bool has_stack_trace_id() const {
     return stack_trace_id() != 0;
   }
 
@@ -194,6 +210,14 @@
     _stack_trace_hash = hash;
   }
 
+  const JfrStackTrace* stack_trace() const {
+    return _stack_trace;
+  }
+
+  void set_stack_trace(const JfrStackTrace* trace) const {
+    _stack_trace = trace;
+  }
+
   bool has_thread() const {
     return _thread_id != 0;
   }
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -21,6 +21,7 @@
  * questions.
  *
  */
+
 #include "precompiled.hpp"
 #include "jfr/jfrEvents.hpp"
 #include "jfr/leakprofiler/sampling/objectSample.hpp"
@@ -35,8 +36,18 @@
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
 
+static ObjectSampler* _instance = NULL;
+
+static ObjectSampler& instance() {
+  assert(_instance != NULL, "invariant");
+  return *_instance;
+}
+
 ObjectSampler::ObjectSampler(size_t size) :
   _priority_queue(new SamplePriorityQueue(size)),
   _list(new SampleList(size)),
@@ -44,7 +55,6 @@
   _total_allocated(0),
   _threshold(0),
   _size(size),
-  _tryLock(0),
   _dead_samples(false) {}
 
 ObjectSampler::~ObjectSampler() {
@@ -54,32 +64,110 @@
   _list = NULL;
 }
 
-void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
+bool ObjectSampler::create(size_t size) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(_instance == NULL, "invariant");
+  _instance = new ObjectSampler(size);
+  return _instance != NULL;
+}
+
+bool ObjectSampler::is_created() {
+  return _instance != NULL;
+}
+
+ObjectSampler* ObjectSampler::sampler() {
+  assert(is_created(), "invariant");
+  return _instance;
+}
+
+void ObjectSampler::destroy() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  if (_instance != NULL) {
+    ObjectSampler* const sampler = _instance;
+    _instance = NULL;
+    delete sampler;
+  }
+}
+
+static volatile int _lock = 0;
+
+ObjectSampler* ObjectSampler::acquire() {
+  assert(is_created(), "invariant");
+  while (Atomic::cmpxchg(1, &_lock, 0) == 1) {}
+  return _instance;
+}
+
+void ObjectSampler::release() {
+  assert(is_created(), "invariant");
+  OrderAccess::fence();
+  _lock = 0;
+}
+
+static traceid get_thread_id(JavaThread* thread) {
   assert(thread != NULL, "invariant");
-  const traceid thread_id = thread->threadObj() != NULL ? thread->jfr_thread_local()->thread_id() : 0;
+  if (thread->threadObj() == NULL) {
+    return 0;
+  }
+  const JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  if (!tl->has_thread_checkpoint()) {
+    JfrCheckpointManager::create_thread_checkpoint(thread);
+  }
+  assert(tl->has_thread_checkpoint(), "invariant");
+  return tl->thread_id();
+}
+
+// Populates the thread local stack frames, but does not add them
+// to the stacktrace repository (...yet, see stacktrace_id() below)
+//
+void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread != NULL, "invariant");
+  if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
+    JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0);
+  }
+}
+
+// We were successful in acquiring the try lock and have been selected for adding a sample.
+// Go ahead with installing our previously taken stacktrace into the stacktrace repository.
+//
+traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(stacktrace->hash() != 0, "invariant");
+  const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread);
+  thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash());
+  return stacktrace_id;
+}
+
+void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
+  assert(thread != NULL, "invariant");
+  assert(is_created(), "invariant");
+
+  const traceid thread_id = get_thread_id(thread);
   if (thread_id == 0) {
     return;
   }
-  assert(thread_id != 0, "invariant");
-
-  if (!thread->jfr_thread_local()->has_thread_checkpoint()) {
-    JfrCheckpointManager::create_thread_checkpoint(thread);
-    assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
-  }
 
-  traceid stack_trace_id = 0;
-  unsigned int stack_trace_hash = 0;
-  if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
-    stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash);
-    thread->jfr_thread_local()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash);
-  }
+  const JfrThreadLocal* const tl = thread->jfr_thread_local();
+  JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
+  fill_stacktrace(&stacktrace, thread);
 
-  JfrTryLock tryLock(&_tryLock);
+  // try enter critical section
+  JfrTryLock tryLock(&_lock);
   if (!tryLock.has_lock()) {
     log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
     return;
   }
 
+  instance().add(obj, allocated, thread_id, &stacktrace, thread);
+}
+
+void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread_id != 0, "invariant");
+  assert(thread != NULL, "invariant");
+  assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+
   if (_dead_samples) {
     scavenge();
     assert(!_dead_samples, "invariant");
@@ -101,13 +189,13 @@
   }
 
   assert(sample != NULL, "invariant");
-  assert(thread_id != 0, "invariant");
   sample->set_thread_id(thread_id);
   sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint());
 
-  if (stack_trace_id != 0) {
-    sample->set_stack_trace_id(stack_trace_id);
-    sample->set_stack_trace_hash(stack_trace_hash);
+  const unsigned int stacktrace_hash = stacktrace->hash();
+  if (stacktrace_hash != 0) {
+    sample->set_stack_trace_id(stacktrace_id(stacktrace, thread));
+    sample->set_stack_trace_hash(stacktrace_hash);
   }
 
   sample->set_span(allocated);
@@ -118,38 +206,16 @@
   _priority_queue->push(sample);
 }
 
-const ObjectSample* ObjectSampler::last() const {
-  return _list->last();
-}
-
-const ObjectSample* ObjectSampler::first() const {
-  return _list->first();
-}
-
-const ObjectSample* ObjectSampler::last_resolved() const {
-  return _list->last_resolved();
-}
-
-void ObjectSampler::set_last_resolved(const ObjectSample* sample) {
-  _list->set_last_resolved(sample);
-}
-
-void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+void ObjectSampler::scavenge() {
   ObjectSample* current = _list->last();
   while (current != NULL) {
     ObjectSample* next = current->next();
-    if (!current->is_dead()) {
-      if (is_alive->do_object_b(current->object())) {
-        // The weakly referenced object is alive, update pointer
-        f->do_oop(const_cast<oop*>(current->object_addr()));
-      } else {
-        current->set_dead();
-        _dead_samples = true;
-      }
+    if (current->is_dead()) {
+      remove_dead(current);
     }
     current = next;
   }
-  _last_sweep = JfrTicks::now();
+  _dead_samples = false;
 }
 
 void ObjectSampler::remove_dead(ObjectSample* sample) {
@@ -166,16 +232,41 @@
   _list->release(sample);
 }
 
-void ObjectSampler::scavenge() {
-  ObjectSample* current = _list->last();
+void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  assert(is_created(), "invariant");
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  ObjectSampler& sampler = instance();
+  ObjectSample* current = sampler._list->last();
   while (current != NULL) {
     ObjectSample* next = current->next();
-    if (current->is_dead()) {
-      remove_dead(current);
+    if (!current->is_dead()) {
+      if (is_alive->do_object_b(current->object())) {
+        // The weakly referenced object is alive, update pointer
+        f->do_oop(const_cast<oop*>(current->object_addr()));
+      } else {
+        current->set_dead();
+        sampler._dead_samples = true;
+      }
     }
     current = next;
   }
-  _dead_samples = false;
+  sampler._last_sweep = JfrTicks::now();
+}
+
+ObjectSample* ObjectSampler::last() const {
+  return _list->last();
+}
+
+const ObjectSample* ObjectSampler::first() const {
+  return _list->first();
+}
+
+const ObjectSample* ObjectSampler::last_resolved() const {
+  return _list->last_resolved();
+}
+
+void ObjectSampler::set_last_resolved(const ObjectSample* sample) {
+  _list->set_last_resolved(sample);
 }
 
 int ObjectSampler::item_count() const {
@@ -189,7 +280,7 @@
 ObjectSample* ObjectSampler::item_at(int index) {
   return const_cast<ObjectSample*>(
     const_cast<const ObjectSampler*>(this)->item_at(index)
-                                   );
+                                  );
 }
 
 const JfrTicks& ObjectSampler::last_sweep() const {
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -28,23 +28,23 @@
 #include "memory/allocation.hpp"
 #include "jfr/utilities/jfrTime.hpp"
 
+typedef u8 traceid;
+
 class BoolObjectClosure;
+class JavaThread;
+class JfrStackTrace;
 class OopClosure;
 class ObjectSample;
-class ObjectSampler;
 class SampleList;
 class SamplePriorityQueue;
-class Thread;
 
 // Class reponsible for holding samples and
 // making sure the samples are evenly distributed as
 // new entries are added and removed.
 class ObjectSampler : public CHeapObj<mtTracing> {
   friend class LeakProfiler;
-  friend class ObjectSampleCheckpoint;
   friend class StartOperation;
   friend class StopOperation;
-  friend class EmitEventOperation;
  private:
   SamplePriorityQueue* _priority_queue;
   SampleList* _list;
@@ -52,25 +52,41 @@
   size_t _total_allocated;
   size_t _threshold;
   size_t _size;
-  volatile int _tryLock;
   bool _dead_samples;
 
+  // Lifecycle
   explicit ObjectSampler(size_t size);
   ~ObjectSampler();
+  static bool create(size_t size);
+  static bool is_created();
+  static void destroy();
 
-  void add(HeapWord* object, size_t size, JavaThread* thread);
+  // Stacktrace
+  static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread);
+  traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread);
+
+  // Sampling
+  static void sample(HeapWord* object, size_t size, JavaThread* thread);
+  void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread);
+  void scavenge();
   void remove_dead(ObjectSample* sample);
-  void scavenge();
 
   // Called by GC
-  void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+  static void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 
- public:
   const ObjectSample* item_at(int index) const;
   ObjectSample* item_at(int index);
   int item_count() const;
+
+ public:
+  static ObjectSampler* sampler();
+
+  // For operations that require exclusive access (non-safepoint)
+  static ObjectSampler* acquire();
+  static void release();
+
   const ObjectSample* first() const;
-  const ObjectSample* last() const;
+  ObjectSample* last() const;
   const ObjectSample* last_resolved() const;
   void set_last_resolved(const ObjectSample* sample);
   const JfrTicks& last_sweep() const;
--- a/src/hotspot/share/jfr/leakprofiler/startOperation.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/startOperation.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -25,35 +25,18 @@
 #ifndef SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP
 #define SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP
 
-#include "jfr/recorder/jfrRecorder.hpp"
-#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/recorder/service/jfrOptionSet.hpp"
-#include "logging/log.hpp"
-#include "runtime/vmOperations.hpp"
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
 
-// Safepoint operation for starting leak profiler object sampler
-class StartOperation : public VM_Operation {
+// Safepoint operation for creating and starting the leak profiler object sampler
+class StartOperation : public OldObjectVMOperation {
  private:
-  jlong _sample_count;
+  int _sample_count;
  public:
-  StartOperation(jlong sample_count) :
-    _sample_count(sample_count) {
-  }
-
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
+  StartOperation(int sample_count) : _sample_count(sample_count) {}
 
   virtual void doit() {
-    assert(!LeakProfiler::is_running(), "invariant");
-    jint queue_size = JfrOptionSet::old_object_queue_size();
-    LeakProfiler::set_object_sampler(new ObjectSampler(queue_size));
-    log_trace(jfr, system)( "Object sampling started");
+    ObjectSampler::create(_sample_count);
   }
 };
 
--- a/src/hotspot/share/jfr/leakprofiler/stopOperation.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/stopOperation.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -25,31 +25,14 @@
 #ifndef SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP
 #define SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP
 
-#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/recorder/service/jfrOptionSet.hpp"
-#include "logging/log.hpp"
-#include "runtime/vmOperations.hpp"
-
-// Safepoint operation for stopping leak profiler object sampler
-class StopOperation : public VM_Operation {
- public:
-  StopOperation() {}
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
 
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
-
+// Safepoint operation for stopping and destroying the leak profiler object sampler
+class StopOperation : public OldObjectVMOperation {
+ public:
   virtual void doit() {
-    assert(LeakProfiler::is_running(), "invariant");
-    ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-    delete object_sampler;
-    LeakProfiler::set_object_sampler(NULL);
-    log_trace(jfr, system)( "Object sampling stopped");
+    ObjectSampler::destroy();
   }
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/vmOperation.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
+#define SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
+
+#include "runtime/vmOperations.hpp"
+
+class OldObjectVMOperation : public VM_Operation {
+ public:
+  Mode evaluation_mode() const {
+    return _safepoint;
+  }
+
+  VMOp_Type type() const {
+    return VMOp_JFROldObject;
+  }
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
--- a/src/hotspot/share/jfr/metadata/metadata.xml	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/metadata/metadata.xml	Sat Aug 24 14:30:27 2019 +0200
@@ -1019,6 +1019,27 @@
     <Field type="ulong" contentType="bytes" name="size" label="Size Written" />
   </Event>
 
+  <Event name="ShenandoahHeapRegionStateChange" category="Java Virtual Machine, GC, Detailed" label="Shenandoah Heap Region State Change" description="Information about a Shenandoah heap region state change"
+    startTime="false">
+    <Field type="uint" name="index" label="Index" />
+    <Field type="ShenandoahHeapRegionState" name="from" label="From" />
+    <Field type="ShenandoahHeapRegionState" name="to" label="To" />
+    <Field type="ulong" contentType="address" name="start" label="Start" />
+    <Field type="ulong" contentType="bytes" name="used" label="Used" />
+  </Event>
+
+  <Event name="ShenandoahHeapRegionInformation" category="Java Virtual Machine, GC, Detailed" label="Shenandoah Heap Region Information" description="Information about a specific heap region in the Shenandoah GC"
+    period="everyChunk">
+    <Field type="uint" name="index" label="Index" />
+    <Field type="ShenandoahHeapRegionState" name="state" label="State" />
+    <Field type="ulong" contentType="address" name="start" label="Start" />
+    <Field type="ulong" contentType="bytes" name="used" label="Used" />
+  </Event>
+
+  <Type name="ShenandoahHeapRegionState" label="Shenandoah Heap Region State">
+    <Field type="string" name="state" label="State" />
+  </Type>
+
   <Type name="ZStatisticsCounterType" label="Z Statistics Counter">
     <Field type="string" name="counter" label="Counter" />
   </Type>
@@ -1198,6 +1219,10 @@
     <Field type="int" name="bytecodeIndex" label="Bytecode Index" />
     <Field type="FrameType" name="type" label="Frame Type" />
   </Type>
+
+  <Type name="ChunkHeader" label="Chunk Header">
+    <Field type="byte" array="true" name="payload" label="Payload" />
+  </Type>
  
   <Relation name="JavaMonitorAddress"/>
   <Relation name="SafepointId"/>
--- a/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -198,7 +198,7 @@
   }
 
   if (write_type) {
-    JfrCheckpointWriter writer(false, true, Thread::current());
+    JfrCheckpointWriter writer;
     write_interface_types(writer);
   }
   static bool is_serializer_registered = false;
--- a/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -67,9 +67,14 @@
  private:
   CPUInformationInterface* _cpu_info_interface;
   CPUPerformanceInterface* _cpu_perf_interface;
-  SystemProcessInterface*  _system_process_interface;
+  SystemProcessInterface* _system_process_interface;
   NetworkPerformanceInterface* _network_performance_interface;
 
+  CPUInformationInterface* cpu_info_interface();
+  CPUPerformanceInterface* cpu_perf_interface();
+  SystemProcessInterface* system_process_interface();
+  NetworkPerformanceInterface* network_performance_interface();
+
   JfrOSInterfaceImpl();
   bool initialize();
   ~JfrOSInterfaceImpl();
@@ -90,28 +95,57 @@
    // system processes information
   int system_processes(SystemProcess** system_processes, int* no_of_sys_processes);
 
-  int network_utilization(NetworkInterface** network_interfaces) const;
+  int network_utilization(NetworkInterface** network_interfaces);
 };
 
 JfrOSInterface::JfrOSInterfaceImpl::JfrOSInterfaceImpl() : _cpu_info_interface(NULL),
                                                            _cpu_perf_interface(NULL),
-                                                           _system_process_interface(NULL) {}
+                                                           _system_process_interface(NULL),
+                                                           _network_performance_interface(NULL) {}
+
+template <typename T>
+static T* create_interface() {
+  ResourceMark rm;
+  T* iface = new T();
+  if (iface != NULL) {
+    if (!iface->initialize()) {
+      delete iface;
+      iface = NULL;
+    }
+  }
+  return iface;
+}
+
+CPUInformationInterface* JfrOSInterface::JfrOSInterfaceImpl::cpu_info_interface() {
+  if (_cpu_info_interface == NULL) {
+    _cpu_info_interface = create_interface<CPUInformationInterface>();
+  }
+  return _cpu_info_interface;
+}
+
+CPUPerformanceInterface* JfrOSInterface::JfrOSInterfaceImpl::cpu_perf_interface() {
+  if (_cpu_perf_interface == NULL) {
+    _cpu_perf_interface = create_interface<CPUPerformanceInterface>();
+  }
+  return _cpu_perf_interface;
+}
+
+SystemProcessInterface* JfrOSInterface::JfrOSInterfaceImpl::system_process_interface() {
+  if (_system_process_interface == NULL) {
+    _system_process_interface = create_interface<SystemProcessInterface>();
+  }
+  return _system_process_interface;
+}
+
+NetworkPerformanceInterface* JfrOSInterface::JfrOSInterfaceImpl::network_performance_interface() {
+  if (_network_performance_interface == NULL) {
+    _network_performance_interface = create_interface<NetworkPerformanceInterface>();
+  }
+  return _network_performance_interface;
+}
 
 bool JfrOSInterface::JfrOSInterfaceImpl::initialize() {
-  _cpu_info_interface = new CPUInformationInterface();
-  if (!(_cpu_info_interface != NULL && _cpu_info_interface->initialize())) {
-    return false;
-  }
-  _cpu_perf_interface = new CPUPerformanceInterface();
-  if (!(_cpu_perf_interface != NULL && _cpu_perf_interface->initialize())) {
-    return false;
-  }
-  _system_process_interface = new SystemProcessInterface();
-  if (!(_system_process_interface != NULL && _system_process_interface->initialize())) {
-    return false;
-  }
-  _network_performance_interface = new NetworkPerformanceInterface();
-  return _network_performance_interface != NULL && _network_performance_interface->initialize();
+  return true;
 }
 
 JfrOSInterface::JfrOSInterfaceImpl::~JfrOSInterfaceImpl(void) {
@@ -133,36 +167,43 @@
   }
 }
 
+int JfrOSInterface::JfrOSInterfaceImpl::cpu_information(CPUInformation& cpu_info) {
+  CPUInformationInterface* const iface = cpu_info_interface();
+  return iface == NULL ? OS_ERR : iface->cpu_information(cpu_info);
+}
+
 int JfrOSInterface::JfrOSInterfaceImpl::cpu_load(int which_logical_cpu, double* cpu_load) {
-  return _cpu_perf_interface->cpu_load(which_logical_cpu, cpu_load);
+  CPUPerformanceInterface* const iface = cpu_perf_interface();
+  return iface == NULL ? OS_ERR : iface->cpu_load(which_logical_cpu, cpu_load);
 }
 
 int JfrOSInterface::JfrOSInterfaceImpl::context_switch_rate(double* rate) {
-  return _cpu_perf_interface->context_switch_rate(rate);
+  CPUPerformanceInterface* const iface = cpu_perf_interface();
+  return iface == NULL ? OS_ERR : iface->context_switch_rate(rate);
 }
 
 int JfrOSInterface::JfrOSInterfaceImpl::cpu_load_total_process(double* cpu_load) {
-  return _cpu_perf_interface->cpu_load_total_process(cpu_load);
+  CPUPerformanceInterface* const iface = cpu_perf_interface();
+  return iface == NULL ? OS_ERR : iface->cpu_load_total_process(cpu_load);
 }
 
 int JfrOSInterface::JfrOSInterfaceImpl::cpu_loads_process(double* pjvmUserLoad,
                                                           double* pjvmKernelLoad,
                                                           double* psystemTotal) {
-  return _cpu_perf_interface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal);
-}
-
-int JfrOSInterface::JfrOSInterfaceImpl::cpu_information(CPUInformation& cpu_info) {
-  return _cpu_info_interface->cpu_information(cpu_info);
+  CPUPerformanceInterface* const iface = cpu_perf_interface();
+  return iface == NULL ? OS_ERR : iface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal);
 }
 
 int JfrOSInterface::JfrOSInterfaceImpl::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) {
   assert(system_processes != NULL, "system_processes pointer is NULL!");
   assert(no_of_sys_processes != NULL, "no_of_sys_processes pointer is NULL!");
-  return _system_process_interface->system_processes(system_processes, no_of_sys_processes);
+  SystemProcessInterface* const iface = system_process_interface();
+  return iface == NULL ? OS_ERR : iface->system_processes(system_processes, no_of_sys_processes);
 }
 
-int JfrOSInterface::JfrOSInterfaceImpl::network_utilization(NetworkInterface** network_interfaces) const {
-  return _network_performance_interface->network_utilization(network_interfaces);
+int JfrOSInterface::JfrOSInterfaceImpl::network_utilization(NetworkInterface** network_interfaces) {
+  NetworkPerformanceInterface* const iface = network_performance_interface();
+  return iface == NULL ? OS_ERR : iface->network_utilization(network_interfaces);
 }
 
 // assigned char* is RESOURCE_HEAP_ALLOCATED
--- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -65,7 +65,9 @@
 #include "services/threadService.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/globalDefinitions.hpp"
-
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/shenandoahJfrSupport.hpp"
+#endif
 /**
  *  JfrPeriodic class
  *  Implementation of declarations in
@@ -629,3 +631,14 @@
   event.set_flushingEnabled(UseCodeCacheFlushing);
   event.commit();
 }
+
+
+TRACE_REQUEST_FUNC(ShenandoahHeapRegionInformation) {
+#if INCLUDE_SHENANDOAHGC
+  if (UseShenandoahGC) {
+    VM_ShenandoahSendHeapRegionInfoEvents op;
+    VMThread::execute(&op);
+  }
+#endif
+}
+
--- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -462,8 +462,8 @@
       last_native_ms = last_java_ms;
     }
     _sample.signal();
-    jlong java_interval = _interval_java == 0 ? max_jlong : MAX2<jlong>(_interval_java, 10);
-    jlong native_interval = _interval_native == 0 ? max_jlong : MAX2<jlong>(_interval_native, 10);
+    jlong java_interval = _interval_java == 0 ? max_jlong : MAX2<jlong>(_interval_java, 1);
+    jlong native_interval = _interval_native == 0 ? max_jlong : MAX2<jlong>(_interval_native, 1);
 
     jlong now_ms = get_monotonic_ms();
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -91,22 +91,18 @@
 static const size_t checkpoint_buffer_cache_count = 2;
 static const size_t checkpoint_buffer_size = 512 * K;
 
-static JfrCheckpointMspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrCheckpointManager* system) {
-  JfrCheckpointMspace* mspace = new JfrCheckpointMspace(buffer_size, limit, cache_count, system);
-  if (mspace != NULL) {
-    mspace->initialize();
-  }
-  return mspace;
+static JfrCheckpointMspace* allocate_mspace(size_t size, size_t limit, size_t cache_count, JfrCheckpointManager* mgr) {
+  return create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(size, limit, cache_count, mgr);
 }
 
 bool JfrCheckpointManager::initialize() {
   assert(_free_list_mspace == NULL, "invariant");
-  _free_list_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
+  _free_list_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
   if (_free_list_mspace == NULL) {
     return false;
   }
   assert(_epoch_transition_mspace == NULL, "invariant");
-  _epoch_transition_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
+  _epoch_transition_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
   if (_epoch_transition_mspace == NULL) {
     return false;
   }
@@ -118,22 +114,6 @@
   return JfrTypeManager::initialize();
 }
 
-bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
-  return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
-}
-
-void JfrCheckpointManager::synchronize_epoch() {
-  assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant");
-  OrderAccess::storestore();
-  _checkpoint_epoch_state = JfrTraceIdEpoch::epoch();
-}
-
-void JfrCheckpointManager::shift_epoch() {
-  debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
-  JfrTraceIdEpoch::shift_epoch();
-  assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
-}
-
 void JfrCheckpointManager::register_service_thread(const Thread* thread) {
   _service_thread = thread;
 }
@@ -155,7 +135,6 @@
 }
 
 #ifdef ASSERT
-
 bool JfrCheckpointManager::is_locked() const {
   return _lock->owned_by_self();
 }
@@ -171,7 +150,6 @@
   assert(buffer->lease(), "invariant");
   assert(buffer->acquired_by_self(), "invariant");
 }
-
 #endif // ASSERT
 
 static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread) {
@@ -189,6 +167,10 @@
   return buffer;
 }
 
+bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
+  return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
+}
+
 static const size_t lease_retry = 10;
 
 BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) {
@@ -252,41 +234,37 @@
   return read_data<jlong>(data + duration_offset);
 }
 
-static bool is_flushpoint(const u1* data) {
-  return read_data<juint>(data + flushpoint_offset) == (juint)1;
-}
-
 static juint number_of_types(const u1* data) {
   return read_data<juint>(data + types_offset);
 }
 
-static void write_checkpoint_header(JfrChunkWriter& cw, intptr_t offset_prev_cp_event, const u1* data) {
+static void write_checkpoint_header(JfrChunkWriter& cw, int64_t offset_prev_cp_event, const u1* data) {
   cw.reserve(sizeof(u4));
-  cw.write((u8)EVENT_CHECKPOINT);
-  cw.write(starttime(data));
-  cw.write(duration(data));
-  cw.write((jlong)offset_prev_cp_event);
-  cw.write(is_flushpoint(data));
-  cw.write(number_of_types(data));
+  cw.write<u8>(EVENT_CHECKPOINT);
+  cw.write<u8>(starttime(data));
+  cw.write<u8>(duration(data));
+  cw.write<u8>(offset_prev_cp_event);
+  cw.write<bool>(false); // not a flushpoint
+  cw.write<juint>(number_of_types(data));
 }
 
 static void write_checkpoint_content(JfrChunkWriter& cw, const u1* data, size_t size) {
   assert(data != NULL, "invariant");
-  cw.write_unbuffered(data + payload_offset, size);
+  cw.write_unbuffered(data + payload_offset, size - sizeof(JfrCheckpointEntry));
 }
 
 static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) {
   assert(data != NULL, "invariant");
+  const int64_t event_begin = cw.current_offset();
   const int64_t last_checkpoint_event = cw.last_checkpoint_offset();
-  const int64_t event_begin = cw.current_offset();
-  const int64_t offset_to_last_checkpoint_event = 0 == last_checkpoint_event ? 0 : last_checkpoint_event - event_begin;
-  const int64_t total_checkpoint_size = total_size(data);
-  write_checkpoint_header(cw, offset_to_last_checkpoint_event, data);
-  write_checkpoint_content(cw, data, total_checkpoint_size - sizeof(JfrCheckpointEntry));
-  const int64_t checkpoint_event_size = cw.current_offset() - event_begin;
-  cw.write_padded_at_offset<u4>(checkpoint_event_size, event_begin);
+  const int64_t delta = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin;
+  const int64_t checkpoint_size = total_size(data);
+  write_checkpoint_header(cw, delta, data);
+  write_checkpoint_content(cw, data, checkpoint_size);
+  const int64_t event_size = cw.current_offset() - event_begin;
+  cw.write_padded_at_offset<u4>(event_size, event_begin);
   cw.set_last_checkpoint_offset(event_begin);
-  return (size_t)total_checkpoint_size;
+  return (size_t)checkpoint_size;
 }
 
 static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) {
@@ -294,14 +272,14 @@
   assert(data != NULL, "invariant");
   assert(size > 0, "invariant");
   const u1* const limit = data + size;
-  const u1* next_entry = data;
+  const u1* next = data;
   size_t processed = 0;
-  while (next_entry < limit) {
-    const size_t checkpoint_size = write_checkpoint_event(cw, next_entry);
+  while (next < limit) {
+    const size_t checkpoint_size = write_checkpoint_event(cw, next);
     processed += checkpoint_size;
-    next_entry += checkpoint_size;
+    next += checkpoint_size;
   }
-  assert(next_entry == limit, "invariant");
+  assert(next == limit, "invariant");
   return processed;
 }
 
@@ -321,57 +299,30 @@
 };
 
 typedef CheckpointWriteOp<JfrCheckpointMspace::Type> WriteOperation;
-typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
 typedef ReleaseOp<JfrCheckpointMspace> CheckpointReleaseOperation;
-typedef CompositeOperation<MutexedWriteOperation, CheckpointReleaseOperation> CheckpointWriteOperation;
 
-static size_t write_mspace_exclusive(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) {
-  Thread* const thread = Thread::current();
+template <template <typename> class WriterHost, template <typename, typename> class CompositeOperation>
+static size_t write_mspace(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) {
+  assert(mspace != NULL, "invariant");
   WriteOperation wo(chunkwriter);
-  MutexedWriteOperation mwo(wo);
-  CheckpointReleaseOperation cro(mspace, thread, false);
-  CheckpointWriteOperation cpwo(&mwo, &cro);
+  WriterHost<WriteOperation> wh(wo);
+  CheckpointReleaseOperation cro(mspace, Thread::current(), false);
+  CompositeOperation<WriterHost<WriteOperation>, CheckpointReleaseOperation> co(&wh, &cro);
   assert(mspace->is_full_empty(), "invariant");
-  process_free_list(cpwo, mspace);
+  process_free_list(co, mspace);
   return wo.processed();
 }
 
-size_t JfrCheckpointManager::write() {
-  const size_t processed = write_mspace_exclusive(_free_list_mspace, _chunkwriter);
-  synchronize_epoch();
-  return processed;
-}
-
-typedef StopOnEmptyIterator<JfrDoublyLinkedList<JfrBuffer> > EmptyIterator;
-
-template <typename Processor>
-static void process_transition_mspace(Processor& processor, JfrCheckpointMspace* mspace) {
-  assert(mspace->is_full_empty(), "invariant");
-  process_free_list_iterator_control<Processor, JfrCheckpointMspace, EmptyIterator>(processor, mspace, forward);
+void JfrCheckpointManager::synchronize_epoch() {
+  assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant");
+  OrderAccess::storestore();
+  _checkpoint_epoch_state = JfrTraceIdEpoch::epoch();
 }
 
-size_t JfrCheckpointManager::flush() {
-  WriteOperation wo(_chunkwriter);
-  MutexedWriteOperation mwo(wo);
-  process_transition_mspace(mwo, _epoch_transition_mspace);
-  assert(_free_list_mspace->is_full_empty(), "invariant");
-  process_free_list(mwo, _free_list_mspace);
-  return wo.processed();
-}
-
-size_t JfrCheckpointManager::write_constants() {
-  write_types();
-  return flush();
-}
-
-size_t JfrCheckpointManager::write_epoch_transition_mspace() {
-  Thread* const thread = Thread::current();
-  WriteOperation wo(_chunkwriter);
-  MutexedWriteOperation mwo(wo);
-  CheckpointReleaseOperation cro(_epoch_transition_mspace, thread, false);
-  CheckpointWriteOperation cpwo(&mwo, &cro);
-  process_transition_mspace(cpwo, _epoch_transition_mspace);
-  return wo.processed();
+void JfrCheckpointManager::shift_epoch() {
+  debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
+  JfrTraceIdEpoch::shift_epoch();
+  assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
 }
 
 typedef DiscardOp<DefaultDiscarder<JfrBuffer> > DiscardOperation;
@@ -383,14 +334,47 @@
   return discarder.elements();
 }
 
+size_t JfrCheckpointManager::write() {
+  const size_t processed = write_mspace<MutexedWriteOp, CompositeOperation>(_free_list_mspace, _chunkwriter);
+  synchronize_epoch();
+  return processed;
+}
+
+typedef MutexedWriteOp<WriteOperation> FlushOperation;
+
+size_t JfrCheckpointManager::flush() {
+  WriteOperation wo(_chunkwriter);
+  FlushOperation fo(wo);
+  assert(_free_list_mspace->is_full_empty(), "invariant");
+  process_free_list(fo, _free_list_mspace);
+  return wo.processed();
+}
+
 size_t JfrCheckpointManager::write_types() {
   ResourceMark rm;
   HandleMark hm;
-  JfrCheckpointWriter writer(false, true, Thread::current());
+  Thread* const t = Thread::current();
+  // Optimization here is to write the types directly into the epoch transition mspace
+  // because the caller will immediately serialize and reset this mspace.
+  JfrBuffer* const buffer = _epoch_transition_mspace->free_tail();
+  assert(buffer != NULL, "invariant");
+  buffer->acquire(t);
+  buffer->set_lease();
+  DEBUG_ONLY(assert_free_lease(buffer);)
+  JfrCheckpointWriter writer(t, buffer);
   JfrTypeManager::write_types(writer);
   return writer.used_size();
 }
 
+size_t JfrCheckpointManager::write_epoch_transition_mspace() {
+  return write_mspace<ExclusiveOp, CompositeOperation>(_epoch_transition_mspace, _chunkwriter);
+}
+
+size_t JfrCheckpointManager::write_constants() {
+  write_types();
+  return write_epoch_transition_mspace();
+}
+
 class JfrNotifyClosure : public ThreadClosure {
  public:
   void do_thread(Thread* t) {
@@ -424,9 +408,7 @@
 }
 
 size_t JfrCheckpointManager::flush_type_set() {
-  const size_t elements = JfrTypeManager::flush_type_set();
-  flush();
-  return elements;
+  return JfrTypeManager::flush_type_set();
 }
 
 void JfrCheckpointManager::create_thread_checkpoint(Thread* t) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -30,12 +30,24 @@
 JfrCheckpointFlush::JfrCheckpointFlush(Type* old, size_t used, size_t requested, Thread* t) :
   _result(JfrCheckpointManager::flush(old, used, requested, t)) {}
 
-JfrCheckpointWriter::JfrCheckpointWriter(bool flushpoint, bool header, Thread* thread) :
-  JfrCheckpointWriterBase(JfrCheckpointManager::lease_buffer(thread), thread),
+JfrCheckpointWriter::JfrCheckpointWriter() :
+  JfrCheckpointWriterBase(JfrCheckpointManager::lease_buffer(Thread::current()), Thread::current()),
   _time(JfrTicks::now()),
   _offset(0),
   _count(0),
-  _flushpoint(flushpoint),
+  _header(true) {
+  assert(this->is_acquired(), "invariant");
+  assert(0 == this->current_offset(), "invariant");
+  if (_header) {
+    reserve(sizeof(JfrCheckpointEntry));
+  }
+}
+
+JfrCheckpointWriter::JfrCheckpointWriter(Thread* t, bool header /* true */) :
+  JfrCheckpointWriterBase(JfrCheckpointManager::lease_buffer(t), t),
+  _time(JfrTicks::now()),
+  _offset(0),
+  _count(0),
   _header(header) {
   assert(this->is_acquired(), "invariant");
   assert(0 == this->current_offset(), "invariant");
@@ -44,13 +56,26 @@
   }
 }
 
-static void write_checkpoint_header(u1* pos, int64_t size, jlong time, bool flushpoint, u4 type_count) {
+JfrCheckpointWriter::JfrCheckpointWriter(Thread* t, JfrBuffer* buffer) :
+  JfrCheckpointWriterBase(buffer, t),
+  _time(JfrTicks::now()),
+  _offset(0),
+  _count(0),
+  _header(true) {
+  assert(this->is_acquired(), "invariant");
+  assert(0 == this->current_offset(), "invariant");
+  if (_header) {
+    reserve(sizeof(JfrCheckpointEntry));
+  }
+}
+
+static void write_checkpoint_header(u1* pos, int64_t size, jlong time, u4 type_count) {
   assert(pos != NULL, "invariant");
   JfrBigEndianWriter be_writer(pos, sizeof(JfrCheckpointEntry));
   be_writer.write(size);
   be_writer.write(time);
   be_writer.write(JfrTicks::now().value() - time);
-  be_writer.write(flushpoint ? (u4)1 : (u4)0);
+  be_writer.write((u4)0); // not a flushpoint
   be_writer.write(type_count);
   assert(be_writer.is_valid(), "invariant");
 }
@@ -73,18 +98,10 @@
   assert(this->used_size() > sizeof(JfrCheckpointEntry), "invariant");
   const int64_t size = this->current_offset();
   assert(size + this->start_pos() == this->current_pos(), "invariant");
-  write_checkpoint_header(const_cast<u1*>(this->start_pos()), size, _time, is_flushpoint(), count());
+  write_checkpoint_header(const_cast<u1*>(this->start_pos()), size, _time, count());
   release();
 }
 
-void JfrCheckpointWriter::set_flushpoint(bool flushpoint) {
-  _flushpoint = flushpoint;
-}
-
-bool JfrCheckpointWriter::is_flushpoint() const {
-  return _flushpoint;
-}
-
 u4 JfrCheckpointWriter::count() const {
   return _count;
 }
@@ -126,7 +143,7 @@
   write_padded_at_offset(nof_entries, offset);
 }
 
-const u1* JfrCheckpointWriter::session_data(size_t* size, const JfrCheckpointContext* ctx /* 0 */) {
+const u1* JfrCheckpointWriter::session_data(size_t* size, bool move /* false */, const JfrCheckpointContext* ctx /* 0 */) {
   assert(this->is_acquired(), "wrong state!");
   if (!this->is_valid()) {
     *size = 0;
@@ -139,9 +156,11 @@
   }
   *size = this->used_size();
   assert(this->start_pos() + *size == this->current_pos(), "invariant");
-  write_checkpoint_header(const_cast<u1*>(this->start_pos()), this->used_offset(), _time, is_flushpoint(), count());
-  this->seek(_offset + (_header ? sizeof(JfrCheckpointEntry) : 0));
-  set_count(0);
+  write_checkpoint_header(const_cast<u1*>(this->start_pos()), this->used_offset(), _time, count());
+  _header = false; // the header is already written
+  if (move) {
+    this->seek(_offset);
+  }
   return this->start_pos();
 }
 
@@ -156,31 +175,23 @@
   this->seek(ctx.offset);
   set_count(ctx.count);
 }
-
 bool JfrCheckpointWriter::has_data() const {
   return this->used_size() > sizeof(JfrCheckpointEntry);
 }
 
-JfrCheckpointBlobHandle JfrCheckpointWriter::checkpoint_blob() {
+JfrCheckpointBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
   size_t size = 0;
-  const u1* data = session_data(&size);
-  return JfrCheckpointBlob::make(data, size);
-}
-
-JfrCheckpointBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
-  if (ctx == NULL) {
-    return checkpoint_blob();
-  }
-  size_t size = 0;
-  const u1* data = session_data(&size, ctx);
+  const u1* data = session_data(&size, false, ctx);
   return JfrCheckpointBlob::make(data, size);
 }
 
 JfrCheckpointBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) {
-  JfrCheckpointBlobHandle data = copy(ctx);
+  size_t size = 0;
+  const u1* data = session_data(&size, true, ctx);
+  JfrCheckpointBlobHandle blob = JfrCheckpointBlob::make(data, size);
   if (ctx != NULL) {
     const_cast<JfrCheckpointContext*>(ctx)->count = 0;
     set_context(*ctx);
   }
-  return data;
+  return blob;
 }
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -54,24 +54,23 @@
 };
 
 class JfrCheckpointWriter : public JfrCheckpointWriterBase {
+  friend class JfrCheckpointManager;
   friend class JfrSerializerRegistration;
  private:
   JfrTicks _time;
   int64_t _offset;
   u4 _count;
-  bool _flushpoint;
   bool _header;
 
   u4 count() const;
   void set_count(u4 count);
   void increment();
-  void set_flushpoint(bool flushpoint);
-  bool is_flushpoint() const;
-  const u1* session_data(size_t* size, const JfrCheckpointContext* ctx = NULL);
+  const u1* session_data(size_t* size, bool move = false, const JfrCheckpointContext* ctx = NULL);
   void release();
-
+  JfrCheckpointWriter(Thread* t, JfrBuffer* buffer);
  public:
-  JfrCheckpointWriter(bool flushpoint, bool header, Thread* thread);
+  JfrCheckpointWriter();
+  JfrCheckpointWriter(Thread* t, bool header = true);
   ~JfrCheckpointWriter();
   void write_type(JfrTypeId type_id);
   void write_count(u4 nof_entries);
@@ -80,7 +79,6 @@
   const JfrCheckpointContext context() const;
   void set_context(const JfrCheckpointContext ctx);
   bool has_data() const;
-  JfrCheckpointBlobHandle checkpoint_blob();
   JfrCheckpointBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
   JfrCheckpointBlobHandle move(const JfrCheckpointContext* ctx = NULL);
 };
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -305,10 +305,10 @@
   bool _flushpoint;
  public:
   explicit TypeSetSerialization(bool class_unload, bool flushpoint) : _elements(0), _class_unload(class_unload), _flushpoint(flushpoint) {}
-  void write(JfrCheckpointWriter& writer, JfrCheckpointWriter* leakp_writer) {
+  void write(JfrCheckpointWriter& writer) {
     MutexLocker cld_lock(SafepointSynchronize::is_at_safepoint() ? NULL : ClassLoaderDataGraph_lock);
     MutexLocker lock(SafepointSynchronize::is_at_safepoint() ? NULL : Module_lock);
-    _elements = JfrTypeSet::serialize(&writer, leakp_writer, _class_unload, _flushpoint);
+    _elements = JfrTypeSet::serialize(&writer, _class_unload, _flushpoint);
   }
   size_t elements() const {
     return _elements;
@@ -317,19 +317,17 @@
 
 void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
   TypeSetSerialization type_set(true, false);
+  type_set.write(writer);
   if (LeakProfiler::is_running()) {
-    JfrCheckpointWriter leakp_writer(false, true, Thread::current());
-    type_set.write(writer, &leakp_writer);
-    ObjectSampleCheckpoint::install(leakp_writer, true, true);
+    ObjectSampleCheckpoint::on_type_set_unload(writer);
     return;
   }
-  type_set.write(writer, NULL);
 };
 
 void FlushTypeSet::serialize(JfrCheckpointWriter& writer) {
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   TypeSetSerialization type_set(false, true);
-  type_set.write(writer, NULL);
+  type_set.write(writer);
   _elements = type_set.elements();
 }
 
@@ -339,13 +337,7 @@
 
 void TypeSet::serialize(JfrCheckpointWriter& writer) {
   TypeSetSerialization type_set(false, false);
-  if (LeakProfiler::is_suspended()) {
-    JfrCheckpointWriter leakp_writer(false, true, Thread::current());
-    type_set.write(writer, &leakp_writer);
-    ObjectSampleCheckpoint::install(leakp_writer, false, true);
-    return;
-  }
-  type_set.write(writer, NULL);
+  type_set.write(writer);
 };
 
 void ThreadStateConstant::serialize(JfrCheckpointWriter& writer) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -99,7 +99,7 @@
   }
 }
 
-void  JfrSerializerRegistration::on_rotation() const {
+void JfrSerializerRegistration::on_rotation() const {
   _serializer->on_rotation();
 }
 
@@ -147,23 +147,27 @@
 }
 
 void JfrTypeManager::write_type_set() {
-  // can safepoint here because of Module_lock
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
-  JfrCheckpointWriter writer(true, true, Thread::current());
+  JfrCheckpointWriter writer;
   TypeSet set;
   set.serialize(writer);
 }
 
 void JfrTypeManager::write_type_set_for_unloaded_classes() {
   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  JfrCheckpointWriter writer(false, true, Thread::current());
+  JfrCheckpointWriter writer;
+  const JfrCheckpointContext ctx = writer.context();
   ClassUnloadTypeSet class_unload_set;
   class_unload_set.serialize(writer);
+  if (!Jfr::is_recording()) {
+    // discard anything written
+    writer.set_context(ctx);
+  }
 }
 
 size_t JfrTypeManager::flush_type_set() {
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
-  JfrCheckpointWriter writer(true, true, Thread::current());
+  JfrCheckpointWriter writer;
   FlushTypeSet flush;
   flush.serialize(writer);
   return flush.elements();
@@ -172,18 +176,18 @@
 void JfrTypeManager::create_thread_checkpoint(Thread* t) {
   assert(t != NULL, "invariant");
   JfrThreadConstant type_thread(t);
-  JfrCheckpointWriter writer(false, true, t);
+  JfrCheckpointWriter writer(t);
   writer.write_type(TYPE_THREAD);
   type_thread.serialize(writer);
   // create and install a checkpoint blob
-  t->jfr_thread_local()->set_thread_checkpoint(writer.checkpoint_blob());
+  t->jfr_thread_local()->set_thread_checkpoint(writer.move());
   assert(t->jfr_thread_local()->has_thread_checkpoint(), "invariant");
 }
 
 void JfrTypeManager::write_thread_checkpoint(Thread* t) {
   assert(t != NULL, "invariant");
   JfrThreadConstant type_thread(t);
-  JfrCheckpointWriter writer(false, true, t);
+  JfrCheckpointWriter writer(t);
   writer.write_type(TYPE_THREAD);
   type_thread.serialize(writer);
 }
@@ -208,7 +212,7 @@
   assert(!types.in_list(registration), "invariant");
   DEBUG_ONLY(assert_not_registered_twice(id, types);)
   if (Jfr::is_recording()) {
-    JfrCheckpointWriter writer(false, true, Thread::current());
+    JfrCheckpointWriter writer;
     registration->invoke(writer);
   }
   types.prepend(registration);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -28,32 +28,22 @@
 #include "classfile/moduleEntry.hpp"
 #include "classfile/packageEntry.hpp"
 #include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
 #include "jfr/jfr.hpp"
 #include "jfr/jni/jfrGetAllEventClasses.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 #include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
 #include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
-#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp"
 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
-#include "jfr/recorder/storage/jfrBuffer.hpp"
 #include "jfr/utilities/jfrHashtable.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
+#include "jfr/writers/jfrTypeWriterHost.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
-#include "memory/resourceArea.hpp"
 #include "utilities/accessFlags.hpp"
 
-// incremented on each checkpoint
-static u8 checkpoint_id = 0;
-
-// creates a unique id by combining a checkpoint relative symbol id (2^24)
-// with the current checkpoint id (2^40)
-#define CREATE_SYMBOL_ID(sym_id) (((u8)((checkpoint_id << 24) | sym_id)))
-
 typedef const Klass* KlassPtr;
 typedef const PackageEntry* PkgPtr;
 typedef const ModuleEntry* ModPtr;
@@ -63,57 +53,92 @@
 typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr;
 typedef const JfrSymbolId::CStringEntry* CStringEntryPtr;
 
-static traceid module_id(PkgPtr pkg) {
-  assert(pkg != NULL, "invariant");
-  ModPtr module_entry = pkg->module();
-  return module_entry != NULL && module_entry->is_named() ? TRACE_ID(module_entry) : 0;
+// incremented on each rotation
+static u8 checkpoint_id = 1;
+
+// creates a unique id by combining a checkpoint relative symbol id (2^24)
+// with the current checkpoint id (2^40)
+#define CREATE_SYMBOL_ID(sym_id) (((u8)((checkpoint_id << 24) | sym_id)))
+
+static traceid create_symbol_id(traceid artifact_id) {
+  return artifact_id != 0 ? CREATE_SYMBOL_ID(artifact_id) : 0;
+}
+
+static JfrCheckpointWriter* _writer = NULL;
+static bool _class_unload = false;
+static bool _flushpoint = false;
+static JfrArtifactSet* _artifacts = NULL;
+static JfrArtifactClosure* _subsystem_callback = NULL;
+
+static bool current_epoch() {
+  return _class_unload || _flushpoint;
+}
+
+static bool previous_epoch() {
+  return !current_epoch();
+}
+
+static bool is_complete() {
+  return !_artifacts->has_klass_entries() && current_epoch();
+}
+
+static traceid mark_symbol(KlassPtr klass) {
+  return klass != NULL ? create_symbol_id(_artifacts->mark(klass)) : 0;
+}
+
+static traceid mark_symbol(Symbol* symbol) {
+  return symbol != NULL ? create_symbol_id(_artifacts->mark(symbol)) : 0;
+}
+
+template <typename T>
+static traceid artifact_id(const T* ptr) {
+  assert(ptr != NULL, "invariant");
+  return TRACE_ID(ptr);
 }
 
 static traceid package_id(KlassPtr klass) {
   assert(klass != NULL, "invariant");
   PkgPtr pkg_entry = klass->package();
-  return pkg_entry == NULL ? 0 : TRACE_ID(pkg_entry);
+  return pkg_entry != NULL ? artifact_id(pkg_entry) : 0;
+}
+
+static traceid module_id(PkgPtr pkg) {
+  assert(pkg != NULL, "invariant");
+  ModPtr module_entry = pkg->module();
+  if (module_entry != NULL && module_entry->is_named()) {
+    SET_TRANSIENT(module_entry);
+    return artifact_id(module_entry);
+  }
+  return 0;
+}
+
+static traceid method_id(KlassPtr klass, MethodPtr method) {
+  assert(klass != NULL, "invariant");
+  assert(method != NULL, "invariant");
+  return METHOD_ID(klass, method);
 }
 
 static traceid cld_id(CldPtr cld) {
   assert(cld != NULL, "invariant");
-  return cld->is_unsafe_anonymous() ? 0 : TRACE_ID(cld);
+  if (cld->is_unsafe_anonymous()) {
+    return 0;
+  }
+  SET_TRANSIENT(cld);
+  return artifact_id(cld);
 }
 
-static void tag_leakp_klass_artifacts(KlassPtr k, bool current_epoch) {
-  assert(k != NULL, "invariant");
-  PkgPtr pkg = k->package();
-  if (pkg != NULL) {
-    tag_leakp_artifact(pkg, current_epoch);
-    ModPtr module = pkg->module();
-    if (module != NULL) {
-      tag_leakp_artifact(module, current_epoch);
-    }
-  }
-  CldPtr cld = k->class_loader_data();
-  assert(cld != NULL, "invariant");
-  if (!cld->is_unsafe_anonymous()) {
-    tag_leakp_artifact(cld, current_epoch);
-  }
+template <typename T>
+static s4 get_flags(const T* ptr) {
+  assert(ptr != NULL, "invariant");
+  return ptr->access_flags().get_flags();
 }
 
-class TagLeakpKlassArtifact {
-  bool _current_epoch;
- public:
-  TagLeakpKlassArtifact(bool current_epoch) : _current_epoch(current_epoch) {}
-  bool operator()(KlassPtr klass) {
-    if (_current_epoch) {
-      if (LEAKP_USED_THIS_EPOCH(klass)) {
-        tag_leakp_klass_artifacts(klass, _current_epoch);
-      }
-    } else {
-      if (LEAKP_USED_PREV_EPOCH(klass)) {
-        tag_leakp_klass_artifacts(klass, _current_epoch);
-      }
-    }
-    return true;
-  }
-};
+template <typename T>
+static void set_serialized(const T* ptr) {
+  assert(ptr != NULL, "invariant");
+  SET_SERIALIZED(ptr);
+  assert(IS_SERIALIZED(ptr), "invariant");
+}
 
 /*
  * In C++03, functions used as template parameters must have external linkage;
@@ -123,10 +148,11 @@
  * The weird naming is an effort to decrease the risk of name clashes.
  */
 
-int write__artifact__klass(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, KlassPtr klass) {
+int write__klass(JfrCheckpointWriter* writer, const void* k) {
   assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  assert(klass != NULL, "invariant");
+  assert(_artifacts != NULL, "invariant");
+  assert(k != NULL, "invariant");
+  KlassPtr klass = (KlassPtr)k;
   traceid pkg_id = 0;
   KlassPtr theklass = klass;
   if (theklass->is_objArray_klass()) {
@@ -138,542 +164,118 @@
   } else {
     assert(theklass->is_typeArray_klass(), "invariant");
   }
-  const traceid symbol_id = artifacts->mark(klass);
-  assert(symbol_id > 0, "need to have an address for symbol!");
-  writer->write(TRACE_ID(klass));
+  writer->write(artifact_id(klass));
   writer->write(cld_id(klass->class_loader_data()));
-  writer->write((traceid)CREATE_SYMBOL_ID(symbol_id));
+  writer->write(mark_symbol(klass));
   writer->write(pkg_id);
-  writer->write((s4)klass->access_flags().get_flags());
-  return 1;
-}
-
-int write__artifact__klass__leakp(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
-  assert(k != NULL, "invariant");
-  KlassPtr klass = (KlassPtr)k;
-  return write__artifact__klass(writer, artifacts, klass);
-}
-
-int write__artifact__klass__serialize(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
-  assert(k != NULL, "invariant");
-  KlassPtr klass = (KlassPtr)k;
-  int result = write__artifact__klass(writer, artifacts, klass);
-  if (IS_NOT_SERIALIZED(klass)) {
-    SET_SERIALIZED(klass);
-  }
-  assert(IS_SERIALIZED(klass), "invariant");
-  return result;
-}
-
-typedef LeakPredicate<KlassPtr> LeakKlassPredicate;
-typedef SerializePredicate<KlassPtr> KlassPredicate;
-typedef JfrPredicatedArtifactWriterImplHost<KlassPtr, LeakKlassPredicate, write__artifact__klass__leakp> LeakKlassWriterImpl;
-typedef JfrArtifactWriterHost<LeakKlassWriterImpl, TYPE_CLASS> LeakKlassWriter;
-typedef JfrPredicatedArtifactWriterImplHost<KlassPtr, KlassPredicate, write__artifact__klass__serialize> KlassWriterImpl;
-typedef JfrArtifactWriterHost<KlassWriterImpl, TYPE_CLASS> KlassWriter;
-
-int write__artifact__method(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, MethodPtr method) {
-  assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  const traceid method_name_symbol_id = artifacts->mark(method->name());
-  assert(method_name_symbol_id > 0, "invariant");
-  const traceid method_sig_symbol_id = artifacts->mark(method->signature());
-  assert(method_sig_symbol_id > 0, "invariant");
-  KlassPtr klass = method->method_holder();
-  assert(klass != NULL, "invariant");
-  assert(METHOD_USED_ANY_EPOCH(klass), "invariant");
-  writer->write((u8)METHOD_ID(klass, method));
-  writer->write((u8)TRACE_ID(klass));
-  writer->write((u8)CREATE_SYMBOL_ID(method_name_symbol_id));
-  writer->write((u8)CREATE_SYMBOL_ID(method_sig_symbol_id));
-  writer->write((u2)method->access_flags().get_flags());
-  writer->write(const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0);
-  return 1;
-}
-
-int write__artifact__method__leakp(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) {
-  assert(m != NULL, "invariant");
-  MethodPtr method = (MethodPtr)m;
-  return write__artifact__method(writer, artifacts, method);
-}
-
-int write__artifact__method__serialize(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) {
-  assert(m != NULL, "invariant");
-  MethodPtr method = (MethodPtr)m;
-  int result = write__artifact__method(writer, artifacts, method);
-  if (METHOD_NOT_SERIALIZED(method)) {
-    SET_METHOD_SERIALIZED(method);
-  }
-  assert(IS_METHOD_SERIALIZED(method), "invariant");
-  return result;
-}
-
-typedef JfrArtifactWriterImplHost<MethodPtr, write__artifact__method__leakp> LeakpMethodWriterImplTarget;
-typedef JfrArtifactWriterHost<LeakpMethodWriterImplTarget, TYPE_METHOD> LeakpMethodWriterImpl;
-typedef SerializePredicate<MethodPtr> MethodPredicate;
-typedef JfrPredicatedArtifactWriterImplHost<MethodPtr, MethodPredicate, write__artifact__method__serialize> MethodWriterImplTarget;
-typedef JfrArtifactWriterHost<MethodWriterImplTarget, TYPE_METHOD> MethodWriterImpl;
-
-int write__artifact__package(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, PkgPtr pkg) {
-  assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  assert(pkg != NULL, "invariant");
-  Symbol* const pkg_name = pkg->name();
-  const traceid package_name_symbol_id = pkg_name != NULL ? artifacts->mark(pkg_name) : 0;
-  assert(package_name_symbol_id > 0, "invariant");
-  writer->write((traceid)TRACE_ID(pkg));
-  writer->write((traceid)CREATE_SYMBOL_ID(package_name_symbol_id));
-  writer->write(module_id(pkg));
-  writer->write((bool)pkg->is_exported());
-  return 1;
-}
-
-int write__artifact__package__leakp(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* p) {
-  assert(p != NULL, "invariant");
-  PkgPtr pkg = (PkgPtr)p;
-  return write__artifact__package(writer, artifacts, pkg);
-}
-
-int write__artifact__package__serialize(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* p) {
-  assert(p != NULL, "invariant");
-  PkgPtr pkg = (PkgPtr)p;
-  int result = write__artifact__package(writer, artifacts, pkg);
-  if (IS_NOT_SERIALIZED(pkg)) {
-    SET_SERIALIZED(pkg);
-  }
-  assert(IS_SERIALIZED(pkg), "invariant");
-  return result;
-}
-
-typedef LeakPredicate<PkgPtr> LeakPackagePredicate;
-//int _compare_pkg_ptr_(PkgPtr const& lhs, PkgPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
-//typedef UniquePredicate<PkgPtr, _compare_pkg_ptr_> PackagePredicate;
-typedef SerializePredicate<PkgPtr> PackagePredicate;
-typedef JfrPredicatedArtifactWriterImplHost<PkgPtr, LeakPackagePredicate, write__artifact__package__leakp> LeakPackageWriterImpl;
-typedef JfrPredicatedArtifactWriterImplHost<PkgPtr, PackagePredicate, write__artifact__package__serialize> PackageWriterImpl;
-typedef JfrArtifactWriterHost<LeakPackageWriterImpl, TYPE_PACKAGE> LeakPackageWriter;
-typedef JfrArtifactWriterHost<PackageWriterImpl, TYPE_PACKAGE> PackageWriter;
-
-int write__artifact__module(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, ModPtr entry) {
-  assert(entry != NULL, "invariant");
-  Symbol* const module_name = entry->name();
-  const traceid module_name_symbol_id = module_name != NULL ? artifacts->mark(module_name) : 0;
-  Symbol* const module_version = entry->version();
-  const traceid module_version_symbol_id = module_version != NULL ? artifacts->mark(module_version) : 0;
-  Symbol* const module_location = entry->location();
-  const traceid module_location_symbol_id = module_location != NULL ? artifacts->mark(module_location) : 0;
-  writer->write((traceid)TRACE_ID(entry));
-  writer->write(module_name_symbol_id == 0 ? (traceid)0 : (traceid)CREATE_SYMBOL_ID(module_name_symbol_id));
-  writer->write(module_version_symbol_id == 0 ? (traceid)0 : (traceid)CREATE_SYMBOL_ID(module_version_symbol_id));
-  writer->write(module_location_symbol_id == 0 ? (traceid)0 : (traceid)CREATE_SYMBOL_ID(module_location_symbol_id));
-  writer->write(cld_id(entry->loader_data()));
-  return 1;
-}
-
-int write__artifact__module__leakp(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) {
-  assert(m != NULL, "invariant");
-  ModPtr entry = (ModPtr)m;
-  return write__artifact__module(writer, artifacts, entry);
-}
-
-int write__artifact__module__serialize(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) {
-  assert(m != NULL, "invariant");
-  ModPtr entry = (ModPtr)m;
-  int result = write__artifact__module(writer, artifacts, entry);
-  CldPtr cld = entry->loader_data();
-  assert(cld != NULL, "invariant");
-  if (IS_NOT_SERIALIZED(cld)) {
-    if (!cld->is_unsafe_anonymous()) {
-      SET_USED_PREV_EPOCH(cld);
-    }
-  }
-  if (IS_NOT_SERIALIZED(entry)) {
-    SET_SERIALIZED(entry);
-  }
-  assert(IS_SERIALIZED(entry), "invariant");
-  return result;
-}
-
-typedef LeakPredicate<ModPtr> LeakModulePredicate;
-//int _compare_mod_ptr_(ModPtr const& lhs, ModPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
-//typedef UniquePredicate<ModPtr, _compare_mod_ptr_> ModulePredicate;
-typedef SerializePredicate<ModPtr> ModulePredicate;
-typedef JfrPredicatedArtifactWriterImplHost<ModPtr, LeakModulePredicate, write__artifact__module__leakp> LeakModuleWriterImpl;
-typedef JfrPredicatedArtifactWriterImplHost<ModPtr, ModulePredicate, write__artifact__module__serialize> ModuleWriterImpl;
-typedef JfrArtifactWriterHost<LeakModuleWriterImpl, TYPE_MODULE> LeakModuleWriter;
-typedef JfrArtifactWriterHost<ModuleWriterImpl, TYPE_MODULE> ModuleWriter;
-
-int write__artifact__classloader(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, CldPtr cld) {
-  assert(cld != NULL, "invariant");
-  assert(!cld->is_unsafe_anonymous(), "invariant");
-  const traceid cld_id = TRACE_ID(cld);
-  // class loader type
-  const Klass* class_loader_klass = cld->class_loader_klass();
-  if (class_loader_klass == NULL) {
-    // (primordial) boot class loader
-    writer->write(cld_id); // class loader instance id
-    writer->write((traceid)0);  // class loader type id (absence of)
-    writer->write((traceid)CREATE_SYMBOL_ID(1)); // 1 maps to synthetic name -> "bootstrap"
-  } else {
-    Symbol* symbol_name = cld->name();
-    const traceid symbol_name_id = symbol_name != NULL ? artifacts->mark(symbol_name) : 0;
-    writer->write(cld_id); // class loader instance id
-    writer->write(TRACE_ID(class_loader_klass)); // class loader type id
-    writer->write(symbol_name_id == 0 ? (traceid)0 :
-      (traceid)CREATE_SYMBOL_ID(symbol_name_id)); // class loader instance name
-  }
-  return 1;
-}
-
-int write__artifact__classloader__leakp(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* c) {
-  assert(c != NULL, "invariant");
-  CldPtr cld = (CldPtr)c;
-  int result = write__artifact__classloader(writer, artifacts, cld);
-  if (IS_NOT_LEAKP_SERIALIZED(cld)) {
-    SET_LEAKP_SERIALIZED(cld);
-  }
-  assert(IS_LEAKP_SERIALIZED(cld), "invariant");
-  return result;
-}
-
-int write__artifact__classloader__serialize(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* c) {
-  assert(c != NULL, "invariant");
-  CldPtr cld = (CldPtr)c;
-  int result = write__artifact__classloader(writer, artifacts, cld);
-  if (IS_NOT_SERIALIZED(cld)) {
-    SET_SERIALIZED(cld);
-  }
-  assert(IS_SERIALIZED(cld), "invariant");
-  return result;
-}
-
-typedef LeakSerializePredicate<CldPtr> LeakCldPredicate;
-//int _compare_cld_ptr_(CldPtr const& lhs, CldPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
-//typedef UniquePredicate<CldPtr, _compare_cld_ptr_> CldPredicate;
-typedef SerializePredicate<CldPtr> CldPredicate;
-typedef JfrPredicatedArtifactWriterImplHost<CldPtr, LeakCldPredicate, write__artifact__classloader__leakp> LeakCldWriterImpl;
-typedef JfrPredicatedArtifactWriterImplHost<CldPtr, CldPredicate, write__artifact__classloader__serialize> CldWriterImpl;
-typedef JfrArtifactWriterHost<LeakCldWriterImpl, TYPE_CLASSLOADER> LeakCldWriter;
-typedef JfrArtifactWriterHost<CldWriterImpl, TYPE_CLASSLOADER> CldWriter;
-
-typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr;
-
-static int write__artifact__symbol__entry__(JfrCheckpointWriter* writer, SymbolEntryPtr entry) {
-  assert(writer != NULL, "invariant");
-  assert(entry != NULL, "invariant");
-  ResourceMark rm;
-  writer->write(CREATE_SYMBOL_ID(entry->id()));
-  writer->write(entry->value()->as_C_string());
-  return 1;
-}
-
-int write__artifact__symbol__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) {
-  assert(e != NULL, "invariant");
-  return write__artifact__symbol__entry__(writer, (SymbolEntryPtr)e);
-}
-
-typedef JfrArtifactWriterImplHost<SymbolEntryPtr, write__artifact__symbol__entry> SymbolEntryWriterImpl;
-typedef JfrArtifactWriterHost<SymbolEntryWriterImpl, TYPE_SYMBOL> SymbolEntryWriter;
-
-typedef const JfrSymbolId::CStringEntry* CStringEntryPtr;
-
-static int write__artifact__cstring__entry__(JfrCheckpointWriter* writer, CStringEntryPtr entry) {
-  assert(writer != NULL, "invariant");
-  assert(entry != NULL, "invariant");
-  writer->write(CREATE_SYMBOL_ID(entry->id()));
-  writer->write(entry->value());
+  writer->write(get_flags(klass));
+  set_serialized(klass);
   return 1;
 }
 
-int write__artifact__cstring__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) {
-  assert(e != NULL, "invariant");
-  return write__artifact__cstring__entry__(writer, (CStringEntryPtr)e);
+static void do_implied(Klass* klass) {
+  assert(klass != NULL, "invariant");
+  if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
+    _subsystem_callback->do_artifact(klass);
+  }
 }
 
-typedef JfrArtifactWriterImplHost<CStringEntryPtr, write__artifact__cstring__entry> CStringEntryWriterImpl;
-typedef JfrArtifactWriterHost<CStringEntryWriterImpl, TYPE_SYMBOL> CStringEntryWriter;
-
-int write__artifact__klass__symbol(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
-  assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invaiant");
-  assert(k != NULL, "invariant");
-  const InstanceKlass* const ik = (const InstanceKlass*)k;
-  if (ik->is_unsafe_anonymous()) {
-    CStringEntryPtr entry =
-      artifacts->map_cstring(JfrSymbolId::unsafe_anonymous_klass_name_hash_code(ik));
-    assert(entry != NULL, "invariant");
-    return write__artifact__cstring__entry__(writer, entry);
+static void do_unloaded_klass(Klass* klass) {
+  assert(klass != NULL, "invariant");
+  assert(_subsystem_callback != NULL, "invariant");
+  if (IS_JDK_JFR_EVENT_SUBKLASS(klass)) {
+    JfrEventClasses::increment_unloaded_event_class();
   }
-
-  SymbolEntryPtr entry = artifacts->map_symbol(JfrSymbolId::regular_klass_name_hash_code(ik));
-  return write__artifact__symbol__entry__(writer, entry);
-}
-
-int _compare_traceid_(const traceid& lhs, const traceid& rhs) {
-  return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
+  if (USED_THIS_EPOCH(klass)) {
+    ObjectSampleCheckpoint::on_klass_unload(klass);
+    _subsystem_callback->do_artifact(klass);
+    return;
+  }
+  do_implied(klass);
 }
 
-template <template <typename> class Predicate>
-class KlassSymbolWriterImpl {
- private:
-  JfrCheckpointWriter* _writer;
-  JfrArtifactSet* _artifacts;
-  Predicate<KlassPtr> _predicate;
-  MethodUsedPredicate<true> _method_used_predicate;
-  MethodFlagPredicate _method_flag_predicate;
-  UniquePredicate<traceid, _compare_traceid_> _unique_predicate;
-
-  int klass_symbols(KlassPtr klass);
-  int package_symbols(PkgPtr pkg);
-  int module_symbols(ModPtr module);
-  int class_loader_symbols(CldPtr cld);
-  int method_symbols(KlassPtr klass);
-
- public:
-  typedef KlassPtr Type;
-  KlassSymbolWriterImpl(JfrCheckpointWriter* writer,
-                        JfrArtifactSet* artifacts,
-                        bool current_epoch) : _writer(writer),
-                                             _artifacts(artifacts),
-                                             _predicate(current_epoch),
-                                             _method_used_predicate(current_epoch),
-                                             _method_flag_predicate(current_epoch),
-                                             _unique_predicate(current_epoch) {}
-
-  int operator()(KlassPtr klass) {
-    assert(klass != NULL, "invariant");
-    int count = 0;
-    if (_predicate(klass)) {
-      count += klass_symbols(klass);
-      PkgPtr pkg = klass->package();
-      if (pkg != NULL) {
-        count += package_symbols(pkg);
-        ModPtr module = pkg->module();
-        if (module != NULL && module->is_named()) {
-          count += module_symbols(module);
-        }
-      }
-      CldPtr cld = klass->class_loader_data();
-      assert(cld != NULL, "invariant");
-      if (!cld->is_unsafe_anonymous()) {
-        count += class_loader_symbols(cld);
-      }
-      if (_method_used_predicate(klass)) {
-        count += method_symbols(klass);
-      }
+static void do_klass(Klass* klass) {
+  assert(klass != NULL, "invariant");
+  assert(_subsystem_callback != NULL, "invariant");
+  if (_flushpoint) {
+    if (USED_THIS_EPOCH(klass)) {
+      _subsystem_callback->do_artifact(klass);
+      return;
     }
-    return count;
+  } else {
+    if (USED_PREV_EPOCH(klass)) {
+      _subsystem_callback->do_artifact(klass);
+      return;
+    }
   }
-};
-
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::klass_symbols(KlassPtr klass) {
-  assert(klass != NULL, "invariant");
-  assert(_predicate(klass), "invariant");
-  const InstanceKlass* const ik = (const InstanceKlass*)klass;
-  if (ik->is_unsafe_anonymous()) {
-    CStringEntryPtr entry =
-      this->_artifacts->map_cstring(JfrSymbolId::unsafe_anonymous_klass_name_hash_code(ik));
-    assert(entry != NULL, "invariant");
-    return _unique_predicate(entry->id()) ? write__artifact__cstring__entry__(this->_writer, entry) : 0;
-  }
-  SymbolEntryPtr entry = this->_artifacts->map_symbol(ik->name());
-  assert(entry != NULL, "invariant");
-  return _unique_predicate(entry->id()) ? write__artifact__symbol__entry__(this->_writer, entry) : 0;
+  do_implied(klass);
 }
 
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::package_symbols(PkgPtr pkg) {
-  assert(pkg != NULL, "invariant");
-  SymbolPtr pkg_name = pkg->name();
-  assert(pkg_name != NULL, "invariant");
-  SymbolEntryPtr package_symbol = this->_artifacts->map_symbol(pkg_name);
-  assert(package_symbol != NULL, "invariant");
-  return _unique_predicate(package_symbol->id()) ? write__artifact__symbol__entry__(this->_writer, package_symbol) : 0;
-}
-
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::module_symbols(ModPtr module) {
-  assert(module != NULL, "invariant");
-  assert(module->is_named(), "invariant");
-  int count = 0;
-  SymbolPtr sym = module->name();
-  SymbolEntryPtr entry = NULL;
-  if (sym != NULL) {
-    entry = this->_artifacts->map_symbol(sym);
-    assert(entry != NULL, "invariant");
-    if (_unique_predicate(entry->id())) {
-      count += write__artifact__symbol__entry__(this->_writer, entry);
-    }
+static void do_klasses() {
+  if (_class_unload) {
+    ClassLoaderDataGraph::classes_unloading_do(&do_unloaded_klass);
+    return;
   }
-  sym = module->version();
-  if (sym != NULL) {
-    entry = this->_artifacts->map_symbol(sym);
-    assert(entry != NULL, "invariant");
-    if (_unique_predicate(entry->id())) {
-      count += write__artifact__symbol__entry__(this->_writer, entry);
-    }
-  }
-  sym = module->location();
-  if (sym != NULL) {
-    entry = this->_artifacts->map_symbol(sym);
-    assert(entry != NULL, "invariant");
-    if (_unique_predicate(entry->id())) {
-      count += write__artifact__symbol__entry__(this->_writer, entry);
-    }
-  }
-  return count;
+  ClassLoaderDataGraph::classes_do(&do_klass);
 }
 
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::class_loader_symbols(CldPtr cld) {
-  assert(cld != NULL, "invariant");
-  assert(!cld->is_unsafe_anonymous(), "invariant");
-  int count = 0;
-  // class loader type
-  const Klass* class_loader_klass = cld->class_loader_klass();
-  if (class_loader_klass == NULL) {
-    // (primordial) boot class loader
-    CStringEntryPtr entry = this->_artifacts->map_cstring(0);
-    assert(entry != NULL, "invariant");
-    assert(strncmp(entry->literal(),
-      BOOTSTRAP_LOADER_NAME,
-      BOOTSTRAP_LOADER_NAME_LEN) == 0, "invariant");
-    if (_unique_predicate(entry->id())) {
-      count += write__artifact__cstring__entry__(this->_writer, entry);
-    }
-  } else {
-    const Symbol* class_loader_name = cld->name();
-    if (class_loader_name != NULL) {
-      SymbolEntryPtr entry = this->_artifacts->map_symbol(class_loader_name);
-      assert(entry != NULL, "invariant");
-      if (_unique_predicate(entry->id())) {
-        count += write__artifact__symbol__entry__(this->_writer, entry);
-      }
-    }
+typedef SerializePredicate<KlassPtr> KlassPredicate;
+typedef JfrPredicatedTypeWriterImplHost<KlassPtr, KlassPredicate, write__klass> KlassWriterImpl;
+typedef JfrTypeWriterHost<KlassWriterImpl, TYPE_CLASS> KlassWriter;
+typedef CompositeFunctor<KlassPtr, KlassWriter, KlassArtifactRegistrator> KlassWriterRegistration;
+typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
+
+static bool write_klasses() {
+  assert(!_artifacts->has_klass_entries(), "invariant");
+  assert(_writer != NULL, "invariant");
+  KlassArtifactRegistrator reg(_artifacts);
+  KlassWriter kw(_writer, _class_unload);
+  KlassWriterRegistration kwr(&kw, &reg);
+  KlassCallback callback(&kwr);
+  _subsystem_callback = &callback;
+  do_klasses();
+  if (is_complete()) {
+    return false;
   }
-  return count;
-}
-
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::method_symbols(KlassPtr klass) {
-  assert(_predicate(klass), "invariant");
-  assert(_method_used_predicate(klass), "invariant");
-  assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
-  int count = 0;
-  const InstanceKlass* const ik = InstanceKlass::cast(klass);
-  const int len = ik->methods()->length();
-  for (int i = 0; i < len; ++i) {
-    MethodPtr method = ik->methods()->at(i);
-    if (_method_flag_predicate(method)) {
-      SymbolEntryPtr entry = this->_artifacts->map_symbol(method->name());
-      assert(entry != NULL, "invariant");
-      if (_unique_predicate(entry->id())) {
-        count += write__artifact__symbol__entry__(this->_writer, entry);
-      }
-      entry = this->_artifacts->map_symbol(method->signature());
-      assert(entry != NULL, "invariant");
-      if (_unique_predicate(entry->id())) {
-        count += write__artifact__symbol__entry__(this->_writer, entry);
-      }
-    }
-  }
-  return count;
+  _artifacts->tally(kw);
+  return true;
 }
 
-typedef KlassSymbolWriterImpl<LeakPredicate> LeakKlassSymbolWriterImpl;
-typedef JfrArtifactWriterHost<LeakKlassSymbolWriterImpl, TYPE_SYMBOL> LeakKlassSymbolWriter;
-
-class ClearKlassAndMethods {
- private:
-  ClearArtifact<KlassPtr> _clear_klass_tag_bits;
-  ClearArtifact<MethodPtr> _clear_method_flag;
-  MethodUsedPredicate<false> _method_used_predicate;
-
- public:
-  ClearKlassAndMethods(bool current_epoch) : _method_used_predicate(current_epoch) {}
-  bool operator()(KlassPtr klass) {
-    if (_method_used_predicate(klass)) {
-      const InstanceKlass* ik = InstanceKlass::cast(klass);
-      const int len = ik->methods()->length();
-      for (int i = 0; i < len; ++i) {
-        MethodPtr method = ik->methods()->at(i);
-        _clear_method_flag(method);
-      }
-    }
-    _clear_klass_tag_bits(klass);
-    return true;
-  }
-};
-
-typedef CompositeFunctor<KlassPtr,
-                         TagLeakpKlassArtifact,
-                         LeakKlassWriter> LeakpKlassArtifactTagging;
-
-typedef CompositeFunctor<KlassPtr,
-                         LeakpKlassArtifactTagging,
-                         KlassWriter> CompositeKlassWriter;
-
-typedef CompositeFunctor<KlassPtr,
-                         CompositeKlassWriter,
-                         KlassArtifactRegistrator> CompositeKlassWriterRegistration;
-
-typedef CompositeFunctor<KlassPtr,
-                         KlassWriter,
-                         KlassArtifactRegistrator> KlassWriterRegistration;
-
-typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
-typedef JfrArtifactCallbackHost<KlassPtr, CompositeKlassWriterRegistration> CompositeKlassCallback;
-
-/*
- * Composite operation
- *
- * TagLeakpKlassArtifact ->
- *   LeakpPredicate ->
- *     LeakpKlassWriter ->
- *       KlassPredicate ->
- *         KlassWriter ->
- *           KlassWriterRegistration
- */
-void JfrTypeSet::write_klass_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(!_artifacts->has_klass_entries(), "invariant");
-  KlassArtifactRegistrator reg(_artifacts);
-  KlassWriter kw(writer, _artifacts, current_epoch());
-  KlassWriterRegistration kwr(&kw, &reg);
-  if (leakp_writer == NULL) {
-    KlassCallback callback(&kwr);
-    _subsystem_callback = &callback;
-    do_klasses();
-    _artifacts->tally(kw);
+template <typename T>
+static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
+  assert(callback != NULL, "invariant");
+  assert(value != NULL, "invariant");
+  if (USED_PREV_EPOCH(value)) {
+    callback->do_artifact(value);
+    assert(IS_NOT_SERIALIZED(value), "invariant");
     return;
   }
-  TagLeakpKlassArtifact tagging(current_epoch());
-  LeakKlassWriter lkw(leakp_writer, _artifacts, current_epoch());
-  LeakpKlassArtifactTagging lpkat(&tagging, &lkw);
-  CompositeKlassWriter ckw(&lpkat, &kw);
-  CompositeKlassWriterRegistration ckwr(&ckw, &reg);
-  CompositeKlassCallback callback(&ckwr);
-  _subsystem_callback = &callback;
-  do_klasses();
+  if (IS_SERIALIZED(value)) {
+    CLEAR_SERIALIZED(value);
+  }
+  assert(IS_NOT_SERIALIZED(value), "invariant");
 }
 
-typedef CompositeFunctor<PkgPtr,
-                         PackageWriter,
-                         ClearArtifact<PkgPtr> > PackageWriterWithClear;
+int write__package(JfrCheckpointWriter* writer, const void* p) {
+  assert(writer != NULL, "invariant");
+  assert(_artifacts != NULL, "invariant");
+  assert(p != NULL, "invariant");
+  PkgPtr pkg = (PkgPtr)p;
+  writer->write(artifact_id(pkg));
+  writer->write(mark_symbol(pkg->name()));
+  writer->write(module_id(pkg));
+  writer->write((bool)pkg->is_exported());
+  set_serialized(pkg);
+  return 1;
+}
 
-typedef CompositeFunctor<PkgPtr,
-                         PackageWriter,
-                         UnTagArtifact<PkgPtr> > PackageWriterWithUnTag;
-typedef CompositeFunctor<PkgPtr,
-                         LeakPackageWriter,
-                         PackageWriter> CompositePackageWriter;
+static void do_package(PackageEntry* entry) {
+  do_previous_epoch_artifact(_subsystem_callback, entry);
+}
 
-typedef CompositeFunctor<PkgPtr,
-                         CompositePackageWriter,
-                         ClearArtifact<PkgPtr> > CompositePackageWriterWithClear;
-typedef CompositeFunctor<PkgPtr,
-                         CompositePackageWriter,
-                         UnTagArtifact<PkgPtr> > CompositePackageWriterWithUnTag;
+static void do_packages() {
+  ClassLoaderDataGraph::packages_do(&do_package);
+}
 
 class PackageFieldSelector {
  public:
@@ -684,96 +286,48 @@
   }
 };
 
-typedef KlassToFieldEnvelope<PackageFieldSelector,
-                             PackageWriterWithClear> KlassPackageWriterWithClear;
-
-typedef KlassToFieldEnvelope<PackageFieldSelector,
-                             PackageWriterWithUnTag> KlassPackageWriterWithUnTag;
+typedef SerializePredicate<PkgPtr> PackagePredicate;
+typedef JfrPredicatedTypeWriterImplHost<PkgPtr, PackagePredicate, write__package> PackageWriterImpl;
+typedef JfrTypeWriterHost<PackageWriterImpl, TYPE_PACKAGE> PackageWriter;
+typedef CompositeFunctor<PkgPtr, PackageWriter, ClearArtifact<PkgPtr> > PackageWriterWithClear;
 typedef KlassToFieldEnvelope<PackageFieldSelector, PackageWriter> KlassPackageWriter;
-typedef KlassToFieldEnvelope<PackageFieldSelector, CompositePackageWriter> KlassCompositePackageWriter;
-typedef KlassToFieldEnvelope<PackageFieldSelector,
-                             CompositePackageWriterWithClear> KlassCompositePackageWriterWithClear;
-
-typedef KlassToFieldEnvelope<PackageFieldSelector,
-                             CompositePackageWriterWithUnTag> KlassCompositePackageWriterWithUnTag;
 typedef JfrArtifactCallbackHost<PkgPtr, PackageWriterWithClear> PackageCallback;
-typedef JfrArtifactCallbackHost<PkgPtr, CompositePackageWriterWithClear> CompositePackageCallback;
 
-static void write_package_constants_current_epoch(JfrArtifactSet* artifacts, JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(artifacts != NULL, "invariant");
-  assert(artifacts->has_klass_entries(), "invariant");
-  PackageWriter pw(writer, artifacts, true);
-  if (leakp_writer == NULL) {
-    KlassPackageWriter kpw(&pw);
-    artifacts->iterate_klasses(kpw);
-    artifacts->tally(pw);
-  } else {
-    LeakPackageWriter lpw(leakp_writer, artifacts, true);
-    CompositePackageWriter cpw(&lpw, &pw);
-    KlassCompositePackageWriter kcpw(&cpw);
-    artifacts->iterate_klasses(kcpw);
-  }
-}
-
-/*
- * Composite operation
- *
- * LeakpPackageWriter ->
- *   PackageWriter ->
- *     ClearArtifact<PackageEntry>
- *
- */
-void JfrTypeSet::write_package_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(_artifacts->has_klass_entries(), "invariant");
-  if (current_epoch()) {
-    write_package_constants_current_epoch(_artifacts, writer, leakp_writer);
-    return;
-  }
-  assert(is_rotating(), "invariant");
-  PackageWriter pw(writer, _artifacts, false);
-  ClearArtifact<PkgPtr> clear;
-  UnTagArtifact<PkgPtr> untag;
-  if (leakp_writer == NULL) {
-    PackageWriterWithUnTag kpw(&pw, &untag);
-    KlassPackageWriterWithUnTag kpwwut(&kpw);
-    _artifacts->iterate_klasses(kpwwut);
+static void write_packages() {
+  assert(_writer != NULL, "invariant");
+  PackageWriter pw(_writer, _class_unload);
+  KlassPackageWriter kpw(&pw);
+  _artifacts->iterate_klasses(kpw);
+  if (previous_epoch()) {
+    ClearArtifact<PkgPtr> clear;
     PackageWriterWithClear pwwc(&pw, &clear);
     PackageCallback callback(&pwwc);
     _subsystem_callback = &callback;
     do_packages();
-    return;
   }
-  LeakPackageWriter lpw(leakp_writer, _artifacts, false);
-  CompositePackageWriter cpw(&lpw, &pw);
-  CompositePackageWriterWithUnTag cpwwut(&cpw, &untag);
-  KlassCompositePackageWriterWithUnTag kcpw(&cpwwut);
-  _artifacts->iterate_klasses(kcpw);
-  CompositePackageWriterWithClear cpwwc(&cpw, &clear);
-  CompositePackageCallback callback(&cpwwc);
-  _subsystem_callback = &callback;
-  do_packages();
+  _artifacts->tally(pw);
 }
 
-typedef CompositeFunctor<ModPtr,
-                         ModuleWriter,
-                         ClearArtifact<ModPtr> > ModuleWriterWithClear;
+int write__module(JfrCheckpointWriter* writer, const void* m) {
+  assert(m != NULL, "invariant");
+  assert(_artifacts != NULL, "invariant");
+  ModPtr mod = (ModPtr)m;
+  writer->write(artifact_id(mod));
+  writer->write(mark_symbol(mod->name()));
+  writer->write(mark_symbol(mod->version()));
+  writer->write(mark_symbol(mod->location()));
+  writer->write(cld_id(mod->loader_data()));
+  set_serialized(mod);
+  return 1;
+}
 
-typedef CompositeFunctor<ModPtr,
-                         ModuleWriter,
-                         UnTagArtifact<ModPtr> > ModuleWriterWithUnTag;
-typedef CompositeFunctor<ModPtr,
-                         LeakModuleWriter,
-                         ModuleWriter> CompositeModuleWriter;
+static void do_module(ModuleEntry* entry) {
+  do_previous_epoch_artifact(_subsystem_callback, entry);
+}
 
-typedef CompositeFunctor<ModPtr,
-                         CompositeModuleWriter,
-                         ClearArtifact<ModPtr> > CompositeModuleWriterWithClear;
-typedef CompositeFunctor<ModPtr,
-                         CompositeModuleWriter,
-                         UnTagArtifact<ModPtr> > CompositeModuleWriterWithUnTag;
-
-typedef JfrArtifactCallbackHost<ModPtr, ModuleWriterWithClear> ModuleCallback;
-typedef JfrArtifactCallbackHost<ModPtr, CompositeModuleWriterWithClear> CompositeModuleCallback;
+static void do_modules() {
+  ClassLoaderDataGraph::modules_do(&do_module);
+}
 
 class ModuleFieldSelector {
  public:
@@ -785,80 +339,51 @@
   }
 };
 
-typedef KlassToFieldEnvelope<ModuleFieldSelector,
-                             ModuleWriterWithClear> KlassModuleWriterWithClear;
-
-typedef KlassToFieldEnvelope<ModuleFieldSelector,
-                             ModuleWriterWithUnTag> KlassModuleWriterWithUnTag;
+typedef SerializePredicate<ModPtr> ModulePredicate;
+typedef JfrPredicatedTypeWriterImplHost<ModPtr, ModulePredicate, write__module> ModuleWriterImpl;
+typedef JfrTypeWriterHost<ModuleWriterImpl, TYPE_MODULE> ModuleWriter;
+typedef CompositeFunctor<ModPtr, ModuleWriter, ClearArtifact<ModPtr> > ModuleWriterWithClear;
+typedef JfrArtifactCallbackHost<ModPtr, ModuleWriterWithClear> ModuleCallback;
 typedef KlassToFieldEnvelope<ModuleFieldSelector, ModuleWriter> KlassModuleWriter;
-typedef KlassToFieldEnvelope<ModuleFieldSelector,  CompositeModuleWriter> KlassCompositeModuleWriter;
-typedef KlassToFieldEnvelope<ModuleFieldSelector,
-                             CompositeModuleWriterWithClear> KlassCompositeModuleWriterWithClear;
-
-typedef KlassToFieldEnvelope<ModuleFieldSelector,
-                             CompositeModuleWriterWithUnTag> KlassCompositeModuleWriterWithUnTag;
 
-static void write_module_constants_current_epoch(JfrArtifactSet* artifacts, JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(artifacts != NULL, "invariant");
-  assert(artifacts->has_klass_entries(), "invariant");
-  ModuleWriter mw(writer, artifacts, true);
-  if (leakp_writer == NULL) {
-    KlassModuleWriter kmw(&mw);
-    artifacts->iterate_klasses(kmw);
-    artifacts->tally(mw);
-  } else {
-    LeakModuleWriter lmw(leakp_writer, artifacts, true);
-    CompositeModuleWriter cmw(&lmw, &mw);
-    KlassCompositeModuleWriter kcmw(&cmw);
-    artifacts->iterate_klasses(kcmw);
-  }
-}
-
-/*
- * Composite operation
- *
- * LeakpModuleWriter ->
- *   ModuleWriter ->
- *     ClearArtifact<ModuleEntry>
- */
-void JfrTypeSet::write_module_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(_artifacts->has_klass_entries(), "invariant");
-  if (current_epoch()) {
-    write_module_constants_current_epoch(_artifacts, writer, leakp_writer);
-    return;
-  }
-  assert(is_rotating(), "invariant");
-  ClearArtifact<ModPtr> clear;
-  UnTagArtifact<ModPtr> untag;
-  ModuleWriter mw(writer, _artifacts, false);
-  if (leakp_writer == NULL) {
-    ModuleWriterWithUnTag kpw(&mw, &untag);
-    KlassModuleWriterWithUnTag kmwwut(&kpw);
-    _artifacts->iterate_klasses(kmwwut);
+static void write_modules() {
+  assert(_writer != NULL, "invariant");
+  ModuleWriter mw(_writer, _class_unload);
+  KlassModuleWriter kmw(&mw);
+  _artifacts->iterate_klasses(kmw);
+  if (previous_epoch()) {
+    ClearArtifact<ModPtr> clear;
     ModuleWriterWithClear mwwc(&mw, &clear);
     ModuleCallback callback(&mwwc);
     _subsystem_callback = &callback;
     do_modules();
-    return;
   }
-  LeakModuleWriter lmw(leakp_writer, _artifacts, false);
-  CompositeModuleWriter cmw(&lmw, &mw);
-  CompositeModuleWriterWithUnTag cmwwut(&cmw, &untag);
-  KlassCompositeModuleWriterWithUnTag kcmw(&cmwwut);
-  _artifacts->iterate_klasses(kcmw);
-  CompositeModuleWriterWithClear cmwwc(&cmw, &clear);
-  CompositeModuleCallback callback(&cmwwc);
-  _subsystem_callback = &callback;
-  do_modules();
+  _artifacts->tally(mw);
 }
 
-typedef CompositeFunctor<CldPtr, CldWriter, ClearArtifact<CldPtr> > CldWriterWithClear;
-typedef CompositeFunctor<CldPtr, CldWriter, UnTagArtifact<CldPtr> > CldWriterWithUnTag;
-typedef CompositeFunctor<CldPtr, LeakCldWriter, CldWriter> CompositeCldWriter;
-typedef CompositeFunctor<CldPtr, CompositeCldWriter, ClearArtifact<CldPtr> > CompositeCldWriterWithClear;
-typedef CompositeFunctor<CldPtr, CompositeCldWriter, UnTagArtifact<CldPtr> > CompositeCldWriterWithUnTag;
-typedef JfrArtifactCallbackHost<CldPtr, CldWriterWithClear> CldCallback;
-typedef JfrArtifactCallbackHost<CldPtr, CompositeCldWriterWithClear> CompositeCldCallback;
+int write__classloader(JfrCheckpointWriter* writer, const void* c) {
+  assert(c != NULL, "invariant");
+  CldPtr cld = (CldPtr)c;
+  assert(!cld->is_unsafe_anonymous(), "invariant");
+  // class loader type
+  const Klass* class_loader_klass = cld->class_loader_klass();
+  if (class_loader_klass == NULL) {
+    // (primordial) boot class loader
+    writer->write(artifact_id(cld)); // class loader instance id
+    writer->write((traceid)0);  // class loader type id (absence of)
+    writer->write(create_symbol_id(1)); // 1 maps to synthetic name -> "bootstrap"
+  } else {
+    writer->write(artifact_id(cld)); // class loader instance id
+    writer->write(artifact_id(class_loader_klass)); // class loader type id
+    writer->write(mark_symbol(cld->name())); // class loader instance name
+  }
+  set_serialized(cld);
+  return 1;
+}
+
+static void do_class_loader_data(ClassLoaderData* cld) {
+  do_previous_epoch_artifact(_subsystem_callback, cld);
+}
 
 class CldFieldSelector {
  public:
@@ -870,350 +395,232 @@
   }
 };
 
-typedef KlassToFieldEnvelope<CldFieldSelector, CldWriter> KlassCldWriter;
-typedef KlassToFieldEnvelope<CldFieldSelector, CldWriterWithClear> KlassCldWriterWithClear;
-typedef KlassToFieldEnvelope<CldFieldSelector, CldWriterWithUnTag> KlassCldWriterWithUnTag;
-typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriter> KlassCompositeCldWriter;
-typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriterWithClear> KlassCompositeCldWriterWithClear;
-typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriterWithUnTag> KlassCompositeCldWriterWithUnTag;
+class CLDCallback : public CLDClosure {
+ public:
+  CLDCallback() {}
+  void do_cld(ClassLoaderData* cld) {
+    assert(cld != NULL, "invariant");
+    if (cld->is_unsafe_anonymous()) {
+      return;
+    }
+    do_class_loader_data(cld);
+  }
+};
 
-static void write_class_loader_constants_current_epoch(JfrArtifactSet* artifacts, JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(artifacts != NULL, "invariant");
-  assert(artifacts->has_klass_entries(), "invariant");
-  CldWriter cldw(writer, artifacts, true);
-  if (leakp_writer == NULL) {
-    KlassCldWriter kcw(&cldw);
-    artifacts->iterate_klasses(kcw);
-    artifacts->tally(cldw);
-  } else {
-    LeakCldWriter lcldw(leakp_writer, artifacts, true);
-    CompositeCldWriter ccldw(&lcldw, &cldw);
-    KlassCompositeCldWriter kccldw(&ccldw);
-    artifacts->iterate_klasses(kccldw);
-  }
+static void do_class_loaders() {
+  CLDCallback cld_cb;
+  ClassLoaderDataGraph::loaded_cld_do(&cld_cb);
 }
 
-/*
- * Composite operation
- *
- * LeakpClassLoaderWriter ->
- *   ClassLoaderWriter ->
- *     ClearArtifact<ClassLoaderData>
- */
-void JfrTypeSet::write_class_loader_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(_artifacts->has_klass_entries(), "invariant");
-  if (current_epoch()) {
-    write_class_loader_constants_current_epoch(_artifacts, writer, leakp_writer);
-    return;
-  }
-  assert(is_rotating(), "invariant");
-  ClearArtifact<CldPtr> clear;
-  UnTagArtifact<CldPtr> untag;
-  CldWriter cldw(writer, _artifacts, false);
-  if (leakp_writer == NULL) {
-    CldWriterWithUnTag cldwut(&cldw, &untag);
-    KlassCldWriterWithUnTag kcldwut(&cldwut);
-    _artifacts->iterate_klasses(kcldwut);
+typedef SerializePredicate<CldPtr> CldPredicate;
+typedef JfrPredicatedTypeWriterImplHost<CldPtr, CldPredicate, write__classloader> CldWriterImpl;
+typedef JfrTypeWriterHost<CldWriterImpl, TYPE_CLASSLOADER> CldWriter;
+typedef CompositeFunctor<CldPtr, CldWriter, ClearArtifact<CldPtr> > CldWriterWithClear;
+typedef JfrArtifactCallbackHost<CldPtr, CldWriterWithClear> CldCallback;
+typedef KlassToFieldEnvelope<CldFieldSelector, CldWriter> KlassCldWriter;
+
+static void write_classloaders() {
+  assert(_writer != NULL, "invariant");
+  CldWriter cldw(_writer, _class_unload);
+  KlassCldWriter kcw(&cldw);
+  _artifacts->iterate_klasses(kcw);
+  if (previous_epoch()) {
+    ClearArtifact<CldPtr> clear;
     CldWriterWithClear cldwwc(&cldw, &clear);
     CldCallback callback(&cldwwc);
     _subsystem_callback = &callback;
     do_class_loaders();
-    return;
   }
-  LeakCldWriter lcldw(leakp_writer, _artifacts, false);
-  CompositeCldWriter ccldw(&lcldw, &cldw);
-  CompositeCldWriterWithUnTag cldwwut(&ccldw, &untag);
-  KlassCompositeCldWriterWithUnTag kccldw(&cldwwut);
-  _artifacts->iterate_klasses(kccldw);
-  CompositeCldWriterWithClear ccldwwc(&ccldw, &clear);
-  CompositeCldCallback callback(&ccldwwc);
-  _subsystem_callback = &callback;
-  do_class_loaders();
+  _artifacts->tally(cldw);
+}
+
+static u1 get_visibility(MethodPtr method) {
+  assert(method != NULL, "invariant");
+  return const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0;
+}
+
+template <>
+void set_serialized<Method>(MethodPtr method) {
+  assert(method != NULL, "invariant");
+  SET_METHOD_SERIALIZED(method);
+  assert(IS_METHOD_SERIALIZED(method), "invariant");
 }
 
-template <bool predicate_bool, typename MethodFunctor>
+int write__method(JfrCheckpointWriter* writer, const void* m) {
+  assert(writer != NULL, "invariant");
+  assert(_artifacts != NULL, "invariant");
+  assert(m != NULL, "invariant");
+  MethodPtr method = (MethodPtr)m;
+  KlassPtr klass = method->method_holder();
+  assert(klass != NULL, "invariant");
+  assert(METHOD_USED_ANY_EPOCH(klass), "invariant");
+  writer->write(method_id(klass, method));
+  writer->write(artifact_id(klass));
+  writer->write(mark_symbol(method->name()));
+  writer->write(mark_symbol(method->signature()));
+  writer->write((u2)get_flags(method));
+  writer->write(get_visibility(method));
+  set_serialized(method);
+  return 1;
+}
+
+template <typename MethodCallback, typename KlassCallback>
 class MethodIteratorHost {
  private:
-  MethodFunctor _method_functor;
-  MethodUsedPredicate<predicate_bool> _method_used_predicate;
+  MethodCallback _method_cb;
+  KlassCallback _klass_cb;
+  MethodUsedPredicate _method_used_predicate;
   MethodFlagPredicate _method_flag_predicate;
-
  public:
   MethodIteratorHost(JfrCheckpointWriter* writer,
-                     JfrArtifactSet* artifacts,
-                     bool current_epoch,
+                     bool current_epoch = false,
+                     bool class_unload = false,
                      bool skip_header = false) :
-    _method_functor(writer, artifacts, current_epoch, skip_header),
+    _method_cb(writer, class_unload, skip_header),
+    _klass_cb(writer, class_unload, skip_header),
     _method_used_predicate(current_epoch),
     _method_flag_predicate(current_epoch) {}
 
   bool operator()(KlassPtr klass) {
     if (_method_used_predicate(klass)) {
       assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
-      const InstanceKlass* ik = InstanceKlass::cast(klass);
+      const InstanceKlass* const ik = InstanceKlass::cast(klass);
       const int len = ik->methods()->length();
       for (int i = 0; i < len; ++i) {
         MethodPtr method = ik->methods()->at(i);
         if (_method_flag_predicate(method)) {
-          _method_functor(method);
+          _method_cb(method);
         }
       }
     }
-    return true;
+    return _klass_cb(klass);
   }
 
-  int count() const { return _method_functor.count(); }
-  void add(int count) { _method_functor.add(count); }
+  int count() const { return _method_cb.count(); }
+  void add(int count) { _method_cb.add(count); }
 };
 
-typedef MethodIteratorHost<true /*leakp */,  LeakpMethodWriterImpl> LeakMethodWriter;
-typedef MethodIteratorHost<false, MethodWriterImpl> MethodWriter;
-typedef CompositeFunctor<KlassPtr, LeakMethodWriter, MethodWriter> CompositeMethodWriter;
-
-/*
- * Composite operation
- *
- * LeakpMethodWriter ->
- *   MethodWriter
- */
-void JfrTypeSet::write_method_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(_artifacts->has_klass_entries(), "invariant");
-  MethodWriter mw(writer, _artifacts, is_not_rotating());
-  if (leakp_writer == NULL) {
-    _artifacts->iterate_klasses(mw);
-    _artifacts->tally(mw);
-    return;
-  }
-  LeakMethodWriter lpmw(leakp_writer, _artifacts, is_not_rotating());
-  CompositeMethodWriter cmw(&lpmw, &mw);
-  _artifacts->iterate_klasses(cmw);
-}
-
-static void write_symbols_leakp(JfrCheckpointWriter* leakp_writer, JfrArtifactSet* artifacts, bool current_epoch) {
-  assert(leakp_writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  LeakKlassSymbolWriter lpksw(leakp_writer, artifacts, current_epoch);
-  artifacts->iterate_klasses(lpksw);
-}
-
-static void write_symbols(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, JfrArtifactSet* artifacts, bool current_epoch) {
-  assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  if (leakp_writer != NULL) {
-    write_symbols_leakp(leakp_writer, artifacts, current_epoch);
-  }
-  // iterate all registered symbols
-  SymbolEntryWriter symbol_writer(writer, artifacts, current_epoch);
-  artifacts->iterate_symbols(symbol_writer);
-  CStringEntryWriter cstring_writer(writer, artifacts, current_epoch, true); // skip header
-  artifacts->iterate_cstrings(cstring_writer);
-  symbol_writer.add(cstring_writer.count());
-  artifacts->tally(symbol_writer);
-}
-
-bool JfrTypeSet::_class_unload = false;
-bool JfrTypeSet::_flushpoint = false;
-JfrArtifactSet* JfrTypeSet::_artifacts = NULL;
-JfrArtifactClosure* JfrTypeSet::_subsystem_callback = NULL;
-
-bool JfrTypeSet::is_rotating() {
-  return !(_class_unload || _flushpoint);
-}
-
-bool JfrTypeSet::is_not_rotating() {
-  return !is_rotating();
-}
-
-bool JfrTypeSet::current_epoch() {
-  return is_not_rotating();
-}
-
-void JfrTypeSet::write_symbol_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(writer != NULL, "invariant");
-  assert(_artifacts->has_klass_entries(), "invariant");
-  write_symbols(writer, leakp_writer, _artifacts, _class_unload);
-}
-
-void JfrTypeSet::do_unloaded_klass(Klass* klass) {
-  assert(klass != NULL, "invariant");
-  assert(_subsystem_callback != NULL, "invariant");
-  if (IS_JDK_JFR_EVENT_SUBKLASS(klass)) {
-    JfrEventClasses::increment_unloaded_event_class();
-  }
-  if (USED_THIS_EPOCH(klass)) { // includes leakp subset
-    _subsystem_callback->do_artifact(klass);
-    return;
-  }
-  if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
-    SET_LEAKP_USED_THIS_EPOCH(klass); // tag leakp "safe byte" for subset inclusion
-    _subsystem_callback->do_artifact(klass);
-  }
-}
-
-void JfrTypeSet::do_klass(Klass* klass) {
-  assert(klass != NULL, "invariant");
-  assert(_subsystem_callback != NULL, "invariant");
-  if (_flushpoint) {
-    if (USED_THIS_EPOCH(klass)) {
-      _subsystem_callback->do_artifact(klass);
-      return;
-    }
-  } else {
-    if (USED_PREV_EPOCH(klass)) { // includes leakp subset
-      _subsystem_callback->do_artifact(klass);
-      return;
-    }
-  }
-  if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
-    if (_flushpoint) {
-      SET_LEAKP_USED_THIS_EPOCH(klass);
-    } else {
-      SET_LEAKP_USED_PREV_EPOCH(klass); // tag leakp "safe byte" for subset inclusion
-    }
-    _subsystem_callback->do_artifact(klass);
-  }
-}
-
-void JfrTypeSet::do_klasses() {
-  if (_class_unload) {
-    ClassLoaderDataGraph::classes_unloading_do(&do_unloaded_klass);
-    return;
-  }
-  ClassLoaderDataGraph::classes_do(&do_klass);
-}
-
-template <typename T>
-static void do_current_epoch_artifact(JfrArtifactClosure* callback, T* value) {
-  assert(callback != NULL, "invariant");
-  assert(value != NULL, "invariant");
-  if (ANY_USED_THIS_EPOCH(value)) { // includes leakp subset
-    callback->do_artifact(value);
-  }
-}
-
-template <typename T>
-static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
-  assert(callback != NULL, "invariant");
-  assert(value != NULL, "invariant");
-  if (ANY_USED_PREV_EPOCH(value)) { // includes leakp subset
-    callback->do_artifact(value);
-    assert(IS_NOT_SERIALIZED(value), "invariant");
-    return;
-  }
-  if (IS_SERIALIZED(value)) {
-    UNSERIALIZE(value);
-  }
-  assert(IS_NOT_SERIALIZED(value), "invariant");
-}
-void JfrTypeSet::do_unloaded_package(PackageEntry* entry) {
-  do_current_epoch_artifact(_subsystem_callback, entry);
-}
-
-void JfrTypeSet::do_package(PackageEntry* entry) {
-  do_previous_epoch_artifact(_subsystem_callback, entry);
-}
-
-void JfrTypeSet::do_packages() {
-  if (_class_unload) {
-    ClassLoaderDataGraph::packages_unloading_do(&do_unloaded_package);
-    return;
-  }
-  ClassLoaderDataGraph::packages_do(&do_package);
-}
-
-void JfrTypeSet::do_unloaded_module(ModuleEntry* entry) {
-  do_current_epoch_artifact(_subsystem_callback, entry);
-}
-
-void JfrTypeSet::do_module(ModuleEntry* entry) {
-  do_previous_epoch_artifact(_subsystem_callback, entry);
-}
-
-void JfrTypeSet::do_modules() {
-  if (_class_unload) {
-    ClassLoaderDataGraph::modules_unloading_do(&do_unloaded_module);
-    return;
-  }
-  ClassLoaderDataGraph::modules_do(&do_module);
-}
-
-void JfrTypeSet::do_unloaded_class_loader_data(ClassLoaderData* cld) {
-  do_current_epoch_artifact(_subsystem_callback, cld);
-}
-
-void JfrTypeSet::do_class_loader_data(ClassLoaderData* cld) {
-  do_previous_epoch_artifact(_subsystem_callback, cld);
-}
-
-class CLDCallback : public CLDClosure {
- private:
-  bool _class_unload;
+template <typename T, template <typename> class Impl>
+class Wrapper {
+  Impl<T> _t;
  public:
-  CLDCallback(bool class_unload) : _class_unload(class_unload) {}
-  void do_cld(ClassLoaderData* cld) {
-    assert(cld != NULL, "invariant");
-    if (cld->is_unsafe_anonymous()) {
-      return;
-    }
-    if (_class_unload) {
-      JfrTypeSet::do_unloaded_class_loader_data(cld);
-      return;
-    }
-    JfrTypeSet::do_class_loader_data(cld);
+  Wrapper(JfrCheckpointWriter*, bool, bool) : _t() {}
+  bool operator()(T const& value) {
+    return _t(value);
   }
 };
 
-void JfrTypeSet::do_class_loaders() {
-  CLDCallback cld_cb(_class_unload);
-  if (_class_unload) {
-    ClassLoaderDataGraph::cld_unloading_do(&cld_cb);
-    return;
-  }
-  ClassLoaderDataGraph::loaded_cld_do(&cld_cb);
+typedef SerializePredicate<MethodPtr> MethodPredicate;
+typedef JfrPredicatedTypeWriterImplHost<MethodPtr, MethodPredicate, write__method> MethodWriterImplTarget;
+typedef JfrTypeWriterHost<MethodWriterImplTarget, TYPE_METHOD> MethodWriterImpl;
+typedef Wrapper<KlassPtr, Stub> KlassCallbackStub;
+typedef MethodIteratorHost<MethodWriterImpl, KlassCallbackStub> MethodWriter;
+
+static void write_methods() {
+  assert(_writer != NULL, "invariant");
+  MethodWriter mw(_writer, current_epoch(), _class_unload);
+  _artifacts->iterate_klasses(mw);
+  _artifacts->tally(mw);
+}
+
+template <>
+void set_serialized<JfrSymbolId::SymbolEntry>(SymbolEntryPtr ptr) {
+  assert(ptr != NULL, "invariant");
+  ptr->set_serialized();
+  assert(ptr->is_serialized(), "invariant");
+}
+
+template <>
+void set_serialized<JfrSymbolId::CStringEntry>(CStringEntryPtr ptr) {
+  assert(ptr != NULL, "invariant");
+  ptr->set_serialized();
+  assert(ptr->is_serialized(), "invariant");
+}
+
+int write__symbol(JfrCheckpointWriter* writer, const void* e) {
+  assert(writer != NULL, "invariant");
+  assert(e != NULL, "invariant");
+  ResourceMark rm;
+  SymbolEntryPtr entry = (SymbolEntryPtr)e;
+  writer->write(create_symbol_id(entry->id()));
+  writer->write(entry->value()->as_C_string());
+  set_serialized(entry);
+  return 1;
 }
 
-static void clear_artifacts(JfrArtifactSet* artifacts, bool current_epoch) {
-  assert(artifacts != NULL, "invariant");
-  assert(artifacts->has_klass_entries(), "invariant");
+int write__cstring(JfrCheckpointWriter* writer, const void* e) {
+  assert(writer != NULL, "invariant");
+  assert(e != NULL, "invariant");
+  CStringEntryPtr entry = (CStringEntryPtr)e;
+  writer->write(create_symbol_id(entry->id()));
+  writer->write(entry->value());
+  set_serialized(entry);
+  return 1;
+}
+
+typedef SymbolPredicate<SymbolEntryPtr> SymPredicate;
+typedef JfrPredicatedTypeWriterImplHost<SymbolEntryPtr, SymPredicate, write__symbol> SymbolEntryWriterImpl;
+typedef JfrTypeWriterHost<SymbolEntryWriterImpl, TYPE_SYMBOL> SymbolEntryWriter;
+typedef SymbolPredicate<CStringEntryPtr> CStringPredicate;
+typedef JfrPredicatedTypeWriterImplHost<CStringEntryPtr, CStringPredicate, write__cstring> CStringEntryWriterImpl;
+typedef JfrTypeWriterHost<CStringEntryWriterImpl, TYPE_SYMBOL> CStringEntryWriter;
+
+static void write_symbols() {
+  assert(_writer != NULL, "invariant");
+  SymbolEntryWriter symbol_writer(_writer, _class_unload);
+  _artifacts->iterate_symbols(symbol_writer);
+  CStringEntryWriter cstring_writer(_writer, _class_unload, true); // skip header
+  _artifacts->iterate_cstrings(cstring_writer);
+  symbol_writer.add(cstring_writer.count());
+  _artifacts->tally(symbol_writer);
+}
 
-  // untag
-  ClearKlassAndMethods clear(current_epoch);
-  artifacts->iterate_klasses(clear);
+typedef Wrapper<KlassPtr, ClearArtifact> ClearKlassBits;
+typedef Wrapper<MethodPtr, ClearArtifact> ClearMethodFlag;
+typedef MethodIteratorHost<ClearMethodFlag, ClearKlassBits> ClearKlassAndMethods;
+
+static size_t teardown() {
+  assert(_artifacts != NULL, "invariant");
+  const size_t total_count = _artifacts->total_count();
+  if (previous_epoch()) {
+    assert(_writer != NULL, "invariant");
+    ClearKlassAndMethods clear(_writer);
+    _artifacts->iterate_klasses(clear);
+    _artifacts->clear();
+    ++checkpoint_id;
+  }
+  return total_count;
+}
+
+static void setup(JfrCheckpointWriter* writer, bool class_unload, bool flushpoint) {
+  _writer = writer;
+  _class_unload = class_unload;
+  _flushpoint = flushpoint;
+  if (_artifacts == NULL) {
+    _artifacts = new JfrArtifactSet(class_unload);
+  } else {
+    _artifacts->initialize(class_unload);
+  }
+  assert(_artifacts != NULL, "invariant");
+  assert(!_artifacts->has_klass_entries(), "invariant");
 }
 
 /**
  * Write all "tagged" (in-use) constant artifacts and their dependencies.
  */
-size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload, bool flushpoint) {
+size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, bool class_unload, bool flushpoint) {
   assert(writer != NULL, "invariant");
   ResourceMark rm;
-  // initialization begin
-  _class_unload = class_unload;
-  _flushpoint = flushpoint;
-  ++checkpoint_id;
-  if (_artifacts == NULL) {
-    _artifacts = new JfrArtifactSet(current_epoch());
-  } else {
-    _artifacts->initialize(current_epoch());
-  }
-  assert(_artifacts != NULL, "invariant");
-  assert(!_artifacts->has_klass_entries(), "invariant");
-  // initialization complete
-
+  setup(writer, class_unload, flushpoint);
   // write order is important because an individual write step
   // might tag an artifact to be written in a subsequent step
-  write_klass_constants(writer, leakp_writer);
-  if (!_artifacts->has_klass_entries()) {
+  if (!write_klasses()) {
     return 0;
   }
-  write_package_constants(writer, leakp_writer);
-  write_module_constants(writer, leakp_writer);
-  write_class_loader_constants(writer, leakp_writer);
-  write_method_constants(writer, leakp_writer);
-  write_symbol_constants(writer, leakp_writer);
-  const size_t total_count = _artifacts->total_count();
-  if (!flushpoint) {
-    clear_artifacts(_artifacts, class_unload);
-  }
-  return total_count;
+  write_packages();
+  write_modules();
+  write_classloaders();
+  write_methods();
+  write_symbols();
+  return teardown();
 }
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -27,51 +27,11 @@
 
 #include "jfr/utilities/jfrAllocation.hpp"
 
-class ClassLoaderData;
-class JfrArtifactClosure;
-class JfrArtifactSet;
 class JfrCheckpointWriter;
-class Klass;
-
-class ModuleEntry;
-class PackageEntry;
 
 class JfrTypeSet : AllStatic {
-  friend class CLDCallback;
-  friend class JfrTypeManager;
-  friend class TypeSetSerialization;
- private:
-  static JfrArtifactSet* _artifacts;
-  static JfrArtifactClosure* _subsystem_callback;
-  static bool _class_unload;
-  static bool _flushpoint;
-  static bool is_rotating();
-  static bool is_not_rotating();
-  static bool current_epoch();
-
-  static void do_klass(Klass* k);
-  static void do_unloaded_klass(Klass* k);
-  static void do_klasses();
-
-  static void do_package(PackageEntry* entry);
-  static void do_unloaded_package(PackageEntry* entry);
-  static void do_packages();
-
-  static void do_module(ModuleEntry* entry);
-  static void do_unloaded_module(ModuleEntry* entry);
-  static void do_modules();
-
-  static void do_class_loader_data(ClassLoaderData* cld);
-  static void do_unloaded_class_loader_data(ClassLoaderData* cld);
-  static void do_class_loaders();
-
-  static void write_klass_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_package_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_module_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_class_loader_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_method_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_symbol_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static size_t serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload, bool flushpoint);
+ public:
+  static size_t serialize(JfrCheckpointWriter* writer, bool class_unload, bool flushpoint);
 };
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESET_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -28,15 +28,21 @@
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 
-JfrSymbolId::JfrSymbolId() : _sym_table(new SymbolTable(this)), _cstring_table(new CStringTable(this)), _symbol_id_counter(0) {
+JfrSymbolId::JfrSymbolId() :
+  _sym_table(new SymbolTable(this)),
+  _cstring_table(new CStringTable(this)),
+  _sym_list(NULL),
+  _cstring_list(NULL),
+  _symbol_id_counter(0),
+  _class_unload(false) {
   assert(_sym_table != NULL, "invariant");
   assert(_cstring_table != NULL, "invariant");
-  initialize();
 }
 
-void JfrSymbolId::initialize() {
+JfrSymbolId::~JfrSymbolId() {
   clear();
-  assert(_symbol_id_counter == 0, "invariant");
+  delete _sym_table;
+  delete _cstring_table;
 }
 
 void JfrSymbolId::clear() {
@@ -51,12 +57,14 @@
     _cstring_table->clear_entries();
   }
   assert(!_cstring_table->has_entries(), "invariant");
+
+  _sym_list = NULL;
+  _cstring_list = NULL;
   _symbol_id_counter = 0;
 }
 
-JfrSymbolId::~JfrSymbolId() {
-  delete _sym_table;
-  delete _cstring_table;
+void JfrSymbolId::set_class_unload(bool class_unload) {
+  _class_unload = class_unload;
 }
 
 traceid JfrSymbolId::mark_unsafe_anonymous_klass_name(const Klass* k) {
@@ -79,7 +87,7 @@
 }
 
 const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(const Symbol* symbol) const {
-  return _sym_table->lookup_only(symbol, (uintptr_t)const_cast<Symbol*>(symbol)->identity_hash());
+  return _sym_table->lookup_only(symbol, (uintptr_t)symbol->identity_hash());
 }
 
 const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(uintptr_t hash) const {
@@ -90,10 +98,13 @@
   return _cstring_table->lookup_only(NULL, hash);
 }
 
-void JfrSymbolId::assign_id(SymbolEntry* entry) {
+void JfrSymbolId::assign_id(const SymbolEntry* entry) {
   assert(entry != NULL, "invariant");
+  const_cast<Symbol*>(entry->literal())->increment_refcount();
   assert(entry->id() == 0, "invariant");
   entry->set_id(++_symbol_id_counter);
+  entry->set_list_next(_sym_list);
+  _sym_list = entry;
 }
 
 bool JfrSymbolId::equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry) {
@@ -103,10 +114,17 @@
   return true;
 }
 
-void JfrSymbolId::assign_id(CStringEntry* entry) {
+void JfrSymbolId::unlink(const SymbolEntry* entry) {
+  assert(entry != NULL, "invariant");
+  const_cast<Symbol*>(entry->literal())->decrement_refcount();
+}
+
+void JfrSymbolId::assign_id(const CStringEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->id() == 0, "invariant");
   entry->set_id(++_symbol_id_counter);
+  entry->set_list_next(_cstring_list);
+  _cstring_list = entry;
 }
 
 bool JfrSymbolId::equals(const char* query, uintptr_t hash, const CStringEntry* entry) {
@@ -116,6 +134,13 @@
   return true;
 }
 
+void JfrSymbolId::unlink(const CStringEntry* entry) {
+  assert(entry != NULL, "invariant");
+  if (entry->id() != 1) {
+    FREE_C_HEAP_ARRAY(char, entry->literal());
+  }
+}
+
 traceid JfrSymbolId::mark(const Klass* k) {
   assert(k != NULL, "invariant");
   traceid symbol_id = 0;
@@ -123,7 +148,7 @@
     symbol_id = mark_unsafe_anonymous_klass_name(k);
   }
   if (0 == symbol_id) {
-    const Symbol* const sym = k->name();
+    Symbol* const sym = k->name();
     if (sym != NULL) {
       symbol_id = mark(sym);
     }
@@ -134,18 +159,26 @@
 
 traceid JfrSymbolId::mark(const Symbol* symbol) {
   assert(symbol != NULL, "invariant");
-  return mark(symbol, (uintptr_t)const_cast<Symbol*>(symbol)->identity_hash());
+  return mark(symbol, (uintptr_t)symbol->identity_hash());
 }
 
 traceid JfrSymbolId::mark(const Symbol* data, uintptr_t hash) {
   assert(data != NULL, "invariant");
   assert(_sym_table != NULL, "invariant");
-  return _sym_table->id(data, hash);
+  const SymbolEntry& entry = _sym_table->lookup_put(data, hash);
+  if (_class_unload) {
+    entry.set_unloading();
+  }
+  return entry.id();
 }
 
 traceid JfrSymbolId::mark(const char* str, uintptr_t hash) {
   assert(str != NULL, "invariant");
-  return _cstring_table->id(str, hash);
+  const CStringEntry& entry = _cstring_table->lookup_put(str, hash);
+  if (_class_unload) {
+    entry.set_unloading();
+  }
+  return entry.id();
 }
 
 bool JfrSymbolId::is_unsafe_anonymous_klass(const Klass* k) {
@@ -181,7 +214,8 @@
   sprintf(hash_buf, "/" UINTX_FORMAT, hashcode);
   const size_t hash_len = strlen(hash_buf);
   const size_t result_len = ik->name()->utf8_length();
-  anonymous_symbol = NEW_RESOURCE_ARRAY(char, result_len + hash_len + 1);
+  anonymous_symbol = NEW_C_HEAP_ARRAY(char, result_len + hash_len + 1, mtTracing);
+  assert(anonymous_symbol != NULL, "invariant");
   ik->name()->as_klass_external_name(anonymous_symbol, (int)result_len + 1);
   assert(strlen(anonymous_symbol) == result_len, "invariant");
   strcpy(anonymous_symbol + result_len, hash_buf);
@@ -196,32 +230,38 @@
   return (uintptr_t)const_cast<Symbol*>(sym)->identity_hash();
 }
 
-JfrArtifactSet::JfrArtifactSet(bool current_epoch) : _symbol_id(new JfrSymbolId()),
+static void preload_bootstrap_loader_name(JfrSymbolId* symbol_id) {
+  assert(symbol_id != NULL, "invariant");
+  assert(!symbol_id->has_entries(), "invariant");
+  symbol_id->mark((const char*)&BOOTSTRAP_LOADER_NAME, 0); // pre-load "bootstrap" into id 1
+}
+
+JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_id(new JfrSymbolId()),
                                                      _klass_list(NULL),
-                                                     _total_count(0),
-                                                     _current_epoch(current_epoch) {
-  initialize(current_epoch);
+                                                     _total_count(0) {
+  preload_bootstrap_loader_name(_symbol_id);
+  initialize(class_unload);
   assert(_klass_list != NULL, "invariant");
 }
 
 static const size_t initial_class_list_size = 200;
-void JfrArtifactSet::initialize(bool current_epoch) {
+
+void JfrArtifactSet::initialize(bool class_unload) {
   assert(_symbol_id != NULL, "invariant");
-  _symbol_id->initialize();
-  assert(!_symbol_id->has_entries(), "invariant");
-  _symbol_id->mark(BOOTSTRAP_LOADER_NAME, 0); // pre-load "bootstrap"
+  _symbol_id->set_class_unload(class_unload);
   _total_count = 0;
-  _current_epoch = current_epoch;
   // resource allocation
   _klass_list = new GrowableArray<const Klass*>(initial_class_list_size, false, mtTracing);
 }
 
 JfrArtifactSet::~JfrArtifactSet() {
   clear();
+  delete _symbol_id;
 }
 
 void JfrArtifactSet::clear() {
   _symbol_id->clear();
+  preload_bootstrap_loader_name(_symbol_id);
   // _klass_list will be cleared by a ResourceMark
 }
 
@@ -275,4 +315,3 @@
 size_t JfrArtifactSet::total_count() const {
   return _total_count;
 }
-
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -76,167 +76,73 @@
 };
 
 template <typename T>
-void tag_leakp_artifact(T const& value, bool current_epoch) {
-  assert(value != NULL, "invariant");
-  if (current_epoch) {
-    SET_LEAKP_USED_THIS_EPOCH(value);
-    assert(LEAKP_USED_THIS_EPOCH(value), "invariant");
-  } else {
-    SET_LEAKP_USED_PREV_EPOCH(value);
-    assert(LEAKP_USED_PREV_EPOCH(value), "invariant");
-  }
-}
-
-template <typename T>
-class LeakpClearArtifact {
-  bool _current_epoch;
- public:
-  LeakpClearArtifact(bool current_epoch) : _current_epoch(current_epoch) {}
-  bool operator()(T const& value) {
-    if (_current_epoch) {
-      if (LEAKP_USED_THIS_EPOCH(value)) {
-        LEAKP_UNUSE_THIS_EPOCH(value);
-      }
-    } else {
-      if (LEAKP_USED_PREV_EPOCH(value)) {
-        LEAKP_UNUSE_PREV_EPOCH(value);
-      }
-    }
-    return true;
-  }
-};
-
-template <typename T>
-class UnTagArtifact {
- public:
-  UnTagArtifact() {}
-  bool operator()(T const& value) {
-    if (LEAKP_USED_PREV_EPOCH(value)) {
-      LEAKP_UNUSE_PREV_EPOCH(value);
-    }
-    if (USED_PREV_EPOCH(value)) {
-      UNUSE_PREV_EPOCH(value);
-    }
-    return true;
-  }
-};
-
-template <typename T>
 class ClearArtifact {
  public:
   bool operator()(T const& value) {
-    if (LEAKP_USED_PREV_EPOCH(value)) {
-      LEAKP_UNUSE_PREV_EPOCH(value);
-    }
-    if (USED_PREV_EPOCH(value)) {
-      UNUSE_PREV_EPOCH(value);
-    }
-    if (IS_SERIALIZED(value)) {
-      UNSERIALIZE(value);
-    }
+    CLEAR_METHOD_AND_CLASS_PREV_EPOCH(value);
+    CLEAR_SERIALIZED(value);
     assert(IS_NOT_SERIALIZED(value), "invariant");
     return true;
   }
 };
 
 template <>
-class ClearArtifact<const Klass*> {
- public:
-  bool operator()(const Klass* klass) {
-    if (LEAKP_USED_PREV_EPOCH(klass)) {
-      LEAKP_UNUSE_PREV_EPOCH(klass);
-    }
-    if (USED_PREV_EPOCH(klass)) {
-      UNUSE_PREV_EPOCH(klass);
-    }
-    if (METHOD_USED_PREV_EPOCH(klass)) {
-      UNUSE_METHOD_PREV_EPOCH(klass);
-    }
-    if (IS_SERIALIZED(klass)) {
-      UNSERIALIZE(klass);
-    }
-    assert(IS_NOT_SERIALIZED(klass), "invariant");
-    return true;
-  }
-};
-
-template <>
 class ClearArtifact<const Method*> {
  public:
   bool operator()(const Method* method) {
-    if (METHOD_FLAG_USED_PREV_EPOCH(method)) {
-      CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
-    }
+    assert(METHOD_FLAG_USED_PREV_EPOCH(method), "invariant");
+    CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
+    CLEAR_METHOD_SERIALIZED(method);
+    assert(METHOD_NOT_SERIALIZED(method), "invariant");
     return true;
   }
 };
 
 template <typename T>
-class LeakPredicate {
-  bool _current_epoch;
- public:
-  LeakPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
-  bool operator()(T const& value) {
-    return _current_epoch ? LEAKP_USED_THIS_EPOCH(value) : LEAKP_USED_PREV_EPOCH(value);
-  }
-};
-
-template <typename T>
-class LeakSerializePredicate {
-  LeakPredicate<T> _leak_predicate;
+class Stub {
  public:
-  LeakSerializePredicate(bool current_epoch) : _leak_predicate(current_epoch) {}
-  bool operator()(T const& value) {
-    return IS_NOT_LEAKP_SERIALIZED(value) && _leak_predicate(value);
-  }
-};
-
-template <typename T>
-class UsedPredicate {
-  bool _current_epoch;
- public:
-  UsedPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
-  bool operator()(T const& value) {
-    return _current_epoch ? USED_THIS_EPOCH(value) : USED_PREV_EPOCH(value);
-  }
+  bool operator()(T const& value) { return true; }
 };
 
 template <typename T>
 class SerializePredicate {
-  bool _current_epoch;
+  bool _class_unload;
  public:
-  SerializePredicate(bool current_epoch) : _current_epoch(current_epoch) {}
+  SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
   bool operator()(T const& value) {
     assert(value != NULL, "invariant");
-    return IS_NOT_SERIALIZED(value);
+    return _class_unload ? true : IS_NOT_SERIALIZED(value);
   }
 };
 
 template <>
 class SerializePredicate<const Method*> {
-  bool _current_epoch;
-public:
-  SerializePredicate(bool current_epoch) : _current_epoch(current_epoch) {}
+  bool _class_unload;
+ public:
+  SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
   bool operator()(const Method* method) {
     assert(method != NULL, "invariant");
-    return METHOD_NOT_SERIALIZED(method);
+    return _class_unload ? true : METHOD_NOT_SERIALIZED(method);
   }
 };
 
-template <typename T, int compare(const T&, const T&)>
-class UniquePredicate {
- private:
-  GrowableArray<T> _seen;
+template <typename T>
+class SymbolPredicate {
+  bool _class_unload;
  public:
-  UniquePredicate(bool) : _seen() {}
+  SymbolPredicate(bool class_unload) : _class_unload(class_unload) {}
   bool operator()(T const& value) {
-    bool not_unique;
-    _seen.template find_sorted<T, compare>(value, not_unique);
-    if (not_unique) {
-      return false;
-    }
-    _seen.template insert_sorted<compare>(value);
-    return true;
+    assert(value != NULL, "invariant");
+    return _class_unload ? value->is_unloading() : !value->is_serialized();
+  }
+};
+
+class MethodUsedPredicate {
+  bool _current_epoch;
+public:
+  MethodUsedPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
+  bool operator()(const Klass* klass) {
+    return _current_epoch ? METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_PREV_EPOCH(klass);
   }
 };
 
@@ -249,38 +155,39 @@
   }
 };
 
-template <bool leakp>
-class MethodUsedPredicate {
-  bool _current_epoch;
- public:
-  MethodUsedPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
-  bool operator()(const Klass* klass) {
-    assert(ANY_USED(klass), "invariant");
-    if (_current_epoch) {
-      return leakp ? LEAKP_METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_THIS_EPOCH(klass);
-    }
-    return leakp ? LEAKP_METHOD_USED_PREV_EPOCH(klass) : METHOD_USED_PREV_EPOCH(klass);
-  }
-};
-
 class JfrSymbolId : public JfrCHeapObj {
   template <typename, typename, template<typename, typename> class, typename, size_t>
   friend class HashTableHost;
-  typedef HashTableHost<const Symbol*, traceid, Entry, JfrSymbolId> SymbolTable;
-  typedef HashTableHost<const char*, traceid, Entry, JfrSymbolId> CStringTable;
+  typedef HashTableHost<const Symbol*, traceid, ListEntry, JfrSymbolId> SymbolTable;
+  typedef HashTableHost<const char*, traceid, ListEntry, JfrSymbolId> CStringTable;
  public:
   typedef SymbolTable::HashEntry SymbolEntry;
   typedef CStringTable::HashEntry CStringEntry;
  private:
   SymbolTable* _sym_table;
   CStringTable* _cstring_table;
+  const SymbolEntry* _sym_list;
+  const CStringEntry* _cstring_list;
   traceid _symbol_id_counter;
+  bool _class_unload;
 
   // hashtable(s) callbacks
-  void assign_id(SymbolEntry* entry);
+  void assign_id(const SymbolEntry* entry);
   bool equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry);
-  void assign_id(CStringEntry* entry);
+  void unlink(const SymbolEntry* entry);
+  void assign_id(const CStringEntry* entry);
   bool equals(const char* query, uintptr_t hash, const CStringEntry* entry);
+  void unlink(const CStringEntry* entry);
+
+  template <typename Functor, typename T>
+  void iterate(Functor& functor, const T* list) {
+    const T* symbol = list;
+    while (symbol != NULL) {
+      const T* next = symbol->list_next();
+      functor(symbol);
+      symbol = next;
+    }
+  }
 
  public:
   static bool is_unsafe_anonymous_klass(const Klass* k);
@@ -291,8 +198,8 @@
   JfrSymbolId();
   ~JfrSymbolId();
 
-  void initialize();
   void clear();
+  void set_class_unload(bool class_unload);
 
   traceid mark_unsafe_anonymous_klass_name(const Klass* k);
   traceid mark(const Symbol* sym, uintptr_t hash);
@@ -304,42 +211,42 @@
   const SymbolEntry* map_symbol(uintptr_t hash) const;
   const CStringEntry* map_cstring(uintptr_t hash) const;
 
-  template <typename T>
-  void symbol(T& functor, const Klass* k) {
+  template <typename Functor>
+  void symbol(Functor& functor, const Klass* k) {
     if (is_unsafe_anonymous_klass(k)) {
       return;
     }
     functor(map_symbol(regular_klass_name_hash_code(k)));
   }
 
-  template <typename T>
-  void symbol(T& functor, const Method* method) {
+  template <typename Functor>
+  void symbol(Functor& functor, const Method* method) {
     assert(method != NULL, "invariant");
     functor(map_symbol((uintptr_t)method->name()->identity_hash()));
     functor(map_symbol((uintptr_t)method->signature()->identity_hash()));
   }
 
-  template <typename T>
-  void cstring(T& functor, const Klass* k) {
+  template <typename Functor>
+  void cstring(Functor& functor, const Klass* k) {
     if (!is_unsafe_anonymous_klass(k)) {
       return;
     }
     functor(map_cstring(unsafe_anonymous_klass_name_hash_code((const InstanceKlass*)k)));
   }
 
-  template <typename T>
-  void iterate_symbols(T& functor) {
-    _sym_table->iterate_entry(functor);
+  template <typename Functor>
+  void iterate_symbols(Functor& functor) {
+    iterate(functor, _sym_list);
   }
 
-  template <typename T>
-  void iterate_cstrings(T& functor) {
-    _cstring_table->iterate_entry(functor);
+  template <typename Functor>
+  void iterate_cstrings(Functor& functor) {
+    iterate(functor, _cstring_list);
   }
 
   bool has_entries() const { return has_symbol_entries() || has_cstring_entries(); }
-  bool has_symbol_entries() const { return _sym_table->has_entries(); }
-  bool has_cstring_entries() const { return _cstring_table->has_entries(); }
+  bool has_symbol_entries() const { return _sym_list != NULL; }
+  bool has_cstring_entries() const { return _cstring_list != NULL; }
 };
 
 /**
@@ -361,16 +268,16 @@
   JfrSymbolId* _symbol_id;
   GrowableArray<const Klass*>* _klass_list;
   size_t _total_count;
-  bool _current_epoch;
 
  public:
-  JfrArtifactSet(bool current_epoch);
+  JfrArtifactSet(bool class_unload);
   ~JfrArtifactSet();
 
   // caller needs ResourceMark
-  void initialize(bool current_epoch);
+  void initialize(bool class_unload);
   void clear();
 
+
   traceid mark(const Symbol* sym, uintptr_t hash);
   traceid mark(const Klass* klass);
   traceid mark(const Symbol* symbol);
@@ -382,7 +289,6 @@
   const JfrSymbolId::CStringEntry* map_cstring(uintptr_t hash) const;
 
   bool has_klass_entries() const;
-  bool current_epoch() const { return _current_epoch; }
   int entries() const;
   size_t total_count() const;
   void register_klass(const Klass* k);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
-#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
-
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-#include "jfr/utilities/jfrTypes.hpp"
-#include "memory/allocation.hpp"
-
-template <typename WriterImpl, u4 ID>
-class JfrArtifactWriterHost : public StackObj {
- private:
-  WriterImpl _impl;
-  JfrCheckpointWriter* _writer;
-  JfrCheckpointContext _ctx;
-  int64_t _count_offset;
-  int _count;
-  bool _skip_header;
- public:
-  JfrArtifactWriterHost(JfrCheckpointWriter* writer,
-                        JfrArtifactSet* artifacts,
-                        bool current_epoch,
-                        bool skip_header = false) : _impl(writer, artifacts, current_epoch),
-                                                    _writer(writer),
-                                                    _ctx(writer->context()),
-                                                    _count(0),
-                                                    _skip_header(skip_header) {
-    assert(_writer != NULL, "invariant");
-    if (!_skip_header) {
-      _writer->write_type((JfrTypeId)ID);
-      _count_offset = _writer->reserve(sizeof(u4)); // Don't know how many yet
-    }
-  }
-
-  ~JfrArtifactWriterHost() {
-    if (_count == 0) {
-      // nothing written, restore context for rewind
-      _writer->set_context(_ctx);
-      return;
-    }
-    assert(_count > 0, "invariant");
-    if (!_skip_header) {
-      _writer->write_count(_count, _count_offset);
-    }
-  }
-
-  bool operator()(typename WriterImpl::Type const & value) {
-    this->_count += _impl(value);
-    return true;
-  }
-
-  int count() const   { return _count; }
-  void add(int count) { _count += count; }
-};
-
-typedef int(*artifact_write_operation)(JfrCheckpointWriter*, JfrArtifactSet*, const void*);
-
-template <typename T, artifact_write_operation op>
-class JfrArtifactWriterImplHost {
- private:
-  JfrCheckpointWriter* _writer;
-  JfrArtifactSet* _artifacts;
-  bool _current_epoch;
- public:
-  typedef T Type;
-  JfrArtifactWriterImplHost(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, bool current_epoch) :
-    _writer(writer), _artifacts(artifacts), _current_epoch(current_epoch) {}
-  int operator()(T const& value) {
-    return op(this->_writer, this->_artifacts, value);
-  }
-};
-
-template <typename T, typename Predicate, artifact_write_operation op>
-class JfrPredicatedArtifactWriterImplHost : public JfrArtifactWriterImplHost<T, op> {
- private:
-  Predicate _predicate;
-  typedef JfrArtifactWriterImplHost<T, op> Parent;
- public:
-  JfrPredicatedArtifactWriterImplHost(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, bool current_epoch) :
-    Parent(writer, artifacts, current_epoch), _predicate(current_epoch) {}
-  int operator()(T const& value) {
-    return _predicate(value) ? Parent::operator()(value) : 0;
-  }
-};
-
-#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -159,7 +159,7 @@
   // This mechanism will retain the event specific flags
   // in the archive, allowing for event flag restoration
   // when renewing the traceid on klass revival.
-  k->set_trace_id(EVENT_FLAGS_MASK(k));
+  k->set_trace_id(EVENT_KLASS_MASK(k));
 }
 
 // used by CDS / APPCDS as part of "restore_unshareable_info"
@@ -181,12 +181,12 @@
   return get(java_lang_Class::as_Klass(my_oop));
 }
 
-traceid JfrTraceId::use(jclass jc, bool leakp /* false */) {
+traceid JfrTraceId::use(jclass jc) {
   assert(jc != NULL, "invariant");
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
   const oop my_oop = JNIHandles::resolve(jc);
   assert(my_oop != NULL, "invariant");
-  return use(java_lang_Class::as_Klass(my_oop), leakp);
+  return use(java_lang_Class::as_Klass(my_oop));
 }
 
 bool JfrTraceId::in_visible_set(const jclass jc) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -90,12 +90,13 @@
   static traceid get(const Thread* thread);
 
   // tag construct as used, returns pre-tagged traceid
-  static traceid use(const Klass* klass, bool leakp = false);
-  static traceid use(jclass jc, bool leakp = false);
-  static traceid use(const Method* method, bool leakp = false);
-  static traceid use(const ModuleEntry* module, bool leakp = false);
-  static traceid use(const PackageEntry* package, bool leakp = false);
-  static traceid use(const ClassLoaderData* cld, bool leakp = false);
+  static traceid use(const Klass* klass);
+  static traceid use(jclass jc);
+  static traceid use(const Method* method);
+  static traceid use(const Klass* klass, const Method* method);
+  static traceid use(const ModuleEntry* module);
+  static traceid use(const PackageEntry* package);
+  static traceid use(const ClassLoaderData* cld);
 
   static void remove(const Klass* klass);
   static void restore(const Klass* klass);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -38,21 +38,11 @@
 #include "utilities/debug.hpp"
 
 template <typename T>
-inline traceid set_used_and_get(const T* type, bool leakp) {
+inline traceid set_used_and_get(const T* type) {
   assert(type != NULL, "invariant");
-  if (leakp) {
-    SET_LEAKP_USED_THIS_EPOCH(type);
-    assert(LEAKP_USED_THIS_EPOCH(type), "invariant");
-  }
   SET_USED_THIS_EPOCH(type);
   assert(USED_THIS_EPOCH(type), "invariant");
-  return TRACE_ID_MASKED_PTR(type);
-}
-
-template <typename T>
-inline traceid set_used_and_get_shifted(const T* type, bool leakp) {
-  assert(type != NULL, "invariant");
-  return set_used_and_get(type, leakp) >> TRACE_ID_SHIFT;
+  return TRACE_ID(type);
 }
 
 inline traceid JfrTraceId::get(const Klass* klass) {
@@ -65,38 +55,38 @@
   return TRACE_ID_RAW(t->jfr_thread_local());
 }
 
-inline traceid JfrTraceId::use(const Klass* klass, bool leakp /* false */) {
+inline traceid JfrTraceId::use(const Klass* klass) {
   assert(klass != NULL, "invariant");
-  return set_used_and_get_shifted(klass, leakp);
+  return set_used_and_get(klass);
 }
 
-inline traceid JfrTraceId::use(const Method* method, bool leakp /* false */) {
+inline traceid JfrTraceId::use(const Method* method) {
+  assert(method != NULL, "invariant");
+  return use(method->method_holder(), method);
+}
+
+inline traceid JfrTraceId::use(const Klass* klass, const Method* method) {
+  assert(klass != NULL, "invariant");
   assert(method != NULL, "invariant");
   SET_METHOD_FLAG_USED_THIS_EPOCH(method);
-  const Klass* const klass = method->method_holder();
-  assert(klass != NULL, "invariant");
-  if (leakp) {
-    SET_LEAKP_USED_THIS_EPOCH(klass);
-    assert(LEAKP_USED_THIS_EPOCH(klass), "invariant");
-  }
   SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
   assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
   return (METHOD_ID(klass, method));
 }
 
-inline traceid JfrTraceId::use(const ModuleEntry* module, bool leakp /* false */) {
+inline traceid JfrTraceId::use(const ModuleEntry* module) {
   assert(module != NULL, "invariant");
-  return set_used_and_get_shifted(module, leakp);
+  return set_used_and_get(module);
 }
 
-inline traceid JfrTraceId::use(const PackageEntry* package, bool leakp /* false */) {
+inline traceid JfrTraceId::use(const PackageEntry* package) {
   assert(package != NULL, "invariant");
-  return set_used_and_get_shifted(package, leakp);
+  return set_used_and_get(package);
 }
 
-inline traceid JfrTraceId::use(const ClassLoaderData* cld, bool leakp /* false */) {
+inline traceid JfrTraceId::use(const ClassLoaderData* cld) {
   assert(cld != NULL, "invariant");
-  return cld->is_unsafe_anonymous() ? 0 : set_used_and_get_shifted(cld, leakp);
+  return cld->is_unsafe_anonymous() ? 0 : set_used_and_get(cld);
 }
 
 inline bool JfrTraceId::in_visible_set(const Klass* klass) {
@@ -112,7 +102,7 @@
 
 inline void JfrTraceId::tag_as_jdk_jfr_event(const Klass* klass) {
   assert(klass != NULL, "invariant");
-  SET_TAG(klass, JDK_JFR_EVENT_KLASS);
+  SET_JDK_JFR_EVENT_KLASS(klass);
   assert(IS_JDK_JFR_EVENT_KLASS(klass), "invariant");
 }
 
@@ -124,7 +114,7 @@
 inline void JfrTraceId::tag_as_jdk_jfr_event_sub(const Klass* k) {
   assert(k != NULL, "invariant");
   if (IS_NOT_AN_EVENT_SUB_KLASS(k)) {
-    SET_TAG(k, JDK_JFR_EVENT_SUBKLASS);
+    SET_JDK_JFR_EVENT_SUBKLASS(k);
   }
   assert(IS_JDK_JFR_EVENT_SUBKLASS(k), "invariant");
 }
@@ -145,7 +135,7 @@
 
 inline void JfrTraceId::tag_as_event_host(const Klass* k) {
   assert(k != NULL, "invariant");
-  SET_TAG(k, EVENT_HOST_KLASS);
+  SET_EVENT_HOST_KLASS(k);
   assert(IS_EVENT_HOST_KLASS(k), "invariant");
 }
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -32,71 +32,56 @@
 
 #ifdef VM_LITTLE_ENDIAN
 static const int low_offset = 0;
-static const int leakp_offset = low_offset + 1;
+static const int meta_offset = low_offset + 1;
 #else
 static const int low_offset = 7;
-static const int leakp_offset = low_offset - 1;
+static const int meta_offset = low_offset - 1;
 #endif
 
 inline void set_bits(jbyte bits, jbyte* const dest) {
   assert(dest != NULL, "invariant");
-  const jbyte current = *dest;
-  if (bits != (current & bits)) {
-    *dest = current | bits;
-
+  if (bits != (*dest & bits)) {
+    *dest |= bits;
     OrderAccess::storestore();
   }
 }
 
-inline void set_bits_cas(jbyte bits, jbyte* const dest) {
+inline jbyte traceid_and(jbyte current, jbyte bits) {
+  return current & bits;
+}
+
+inline jbyte traceid_or(jbyte current, jbyte bits) {
+  return current | bits;
+}
+
+inline jbyte traceid_xor(jbyte current, jbyte bits) {
+  return current ^ bits;
+}
+
+template <jbyte op(jbyte, jbyte)>
+inline void set_bits_cas_form(jbyte bits, jbyte* const dest) {
   assert(dest != NULL, "invariant");
   do {
-    const jbyte current = OrderAccess::load_acquire(dest);
-    if (bits == (current & bits)) {
-      return;
-    }
-    const jbyte new_value = current | bits;
+    const jbyte current = *dest;
+    const jbyte new_value = op(current, bits);
     if (Atomic::cmpxchg(new_value, dest, current) == current) {
       return;
     }
   } while (true);
 }
 
+inline void set_bits_cas(jbyte bits, jbyte* const dest) {
+  set_bits_cas_form<traceid_or>(bits, dest);
+}
+
 inline void clear_bits_cas(jbyte bits, jbyte* const dest) {
-  assert(dest != NULL, "invariant");
-  do {
-    const jbyte current = OrderAccess::load_acquire(dest);
-    if (bits != (current & bits)) {
-      return;
-    }
-    const jbyte new_value = current ^ bits;
-    if (Atomic::cmpxchg(new_value, dest, current) == current) {
-      return;
-    }
-  } while (true);
+  set_bits_cas_form<traceid_xor>(bits, dest);
 }
 
 inline void set_mask(jbyte mask, jbyte* const dest) {
-  assert(dest != NULL, "invariant");
-  const jbyte current = *dest;
-  if (mask != (current & mask)) {
-    *dest = current & mask;
-    OrderAccess::storestore();
-  }
+  set_bits_cas_form<traceid_and>(mask, dest);
 }
-inline void set_mask_cas(jbyte mask, jbyte* const dest) {
-  assert(dest != NULL, "invariant");
-  do {
-    const jbyte current = OrderAccess::load_acquire(dest);
-    if (mask == (current & mask)) {
-      return;
-    }
-    const jbyte new_value = current & mask;
-    if (Atomic::cmpxchg(new_value, dest, current) == current) {
-      return;
-    }
-  } while (true);
-}
+
 inline void set_traceid_bits(jbyte bits, traceid* dest) {
   set_bits(bits, ((jbyte*)dest) + low_offset);
 }
@@ -109,16 +94,28 @@
   set_mask(mask, ((jbyte*)dest) + low_offset);
 }
 
-inline void set_leakp_traceid_bits(jbyte bits, traceid* dest) {
-  set_bits(bits, ((jbyte*)dest) + leakp_offset);
+inline void set_meta_bits(jbyte bits, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  *dest |= bits;
+}
+
+inline void set_traceid_meta_bits(jbyte bits, traceid* dest) {
+  set_meta_bits(bits, ((jbyte*)dest) + meta_offset);
 }
 
-inline void set_leakp_traceid_bits_cas(jbyte bits, traceid* dest) {
-  set_bits_cas(bits, ((jbyte*)dest) + leakp_offset);
+inline void set_meta_mask(jbyte mask, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  *dest &= mask;
 }
 
-inline void set_leakp_traceid_mask(jbyte mask, traceid* dest) {
-  set_mask_cas(mask, ((jbyte*)dest) + leakp_offset);
+inline void set_traceid_meta_mask(jbyte mask, traceid* dest) {
+  set_meta_mask(mask, ((jbyte*)dest) + meta_offset);
+}
+
+// only used by a single thread with no visibility requirements
+inline void clear_meta_bits(jbyte bits, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  *dest ^= bits;
 }
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -32,19 +32,8 @@
 #define METHOD_USED_BIT (USED_BIT << 2)
 #define EPOCH_1_SHIFT 0
 #define EPOCH_2_SHIFT 1
-#define SERIALIZED_SHIFT 2
-#define LEAKP_SERIALIZED_SHIFT 3
-#define LEAKP_SHIFT 8
-
 #define USED_EPOCH_1_BIT (USED_BIT << EPOCH_1_SHIFT)
 #define USED_EPOCH_2_BIT (USED_BIT << EPOCH_2_SHIFT)
-#define LEAKP_USED_EPOCH_1_BIT (USED_EPOCH_1_BIT << LEAKP_SHIFT)
-#define LEAKP_USED_EPOCH_2_BIT (USED_EPOCH_2_BIT << LEAKP_SHIFT)
-#define SERIALIZED_BIT (USED_BIT << SERIALIZED_SHIFT)
-#define SERIALIZED_TEST_BIT (SERIALIZED_BIT << LEAKP_SHIFT)
-#define LEAKP_SERIALIZED_BIT (USED_BIT << LEAKP_SERIALIZED_SHIFT)
-#define LEAKP_SERIALIZED_TEST_BIT (LEAKP_SERIALIZED_BIT << LEAKP_SHIFT)
-#define METHOD_LEAKP_SERIALIZED_BIT ((USED_BIT << LEAKP_SERIALIZED_SHIFT))
 #define METHOD_USED_EPOCH_1_BIT (METHOD_USED_BIT << EPOCH_1_SHIFT)
 #define METHOD_USED_EPOCH_2_BIT (METHOD_USED_BIT << EPOCH_2_SHIFT)
 #define METHOD_AND_CLASS_IN_USE_BITS (METHOD_USED_BIT | USED_BIT)
@@ -82,14 +71,6 @@
     return _epoch_state ? USED_EPOCH_1_BIT : USED_EPOCH_2_BIT;
   }
 
-  static traceid leakp_in_use_this_epoch_bit() {
-    return _epoch_state ? LEAKP_USED_EPOCH_2_BIT : LEAKP_USED_EPOCH_1_BIT;
-  }
-
-  static traceid leakp_in_use_prev_epoch_bit() {
-    return _epoch_state ? LEAKP_USED_EPOCH_1_BIT : LEAKP_USED_EPOCH_2_BIT;
-  }
-
   static traceid method_in_use_this_epoch_bit() {
     return _epoch_state ? METHOD_USED_EPOCH_2_BIT : METHOD_USED_EPOCH_1_BIT;
   }
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -36,176 +36,100 @@
  * If a class member (method) is used, either the third or fourth bit is tagged.
  * Which bit to set is a function of the epoch. This allows for concurrent tagging.
  *
- * LeakProfiler subsystem gets its own byte and uses the same tagging scheme but is shifted up 8.
- *
- * We also tag the individual method by using the TraceFlag field,
+ * We also tag individual methods by using the _trace_flags field,
  * (see jfr/support/jfrTraceIdExtension.hpp for details)
  *
  */
 
-// these are defined in jfr/support/jfrKlassExtension.hpp
+// the following are defined in jfr/support/jfrKlassExtension.hpp
 //
-// #define JDK_JFR_EVENT_SUBKLASS  16
-// #define JDK_JFR_EVENT_KLASS     32
-// #define EVENT_HOST_KLASS        64
-// #define EVENT_RESERVED          128
-
-#define IS_JDK_JFR_EVENT_SUBKLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_SUBKLASS)) != 0)
+// #define JDK_JFR_EVENT_SUBKLASS                 16
+// #define JDK_JFR_EVENT_KLASS                    32
+// #define EVENT_HOST_KLASS                       64
 
-#define IS_SERIALIZED(ptr)        (((ptr)->trace_id() & (SERIALIZED_TEST_BIT)) != 0)
-#define IS_NOT_SERIALIZED(ptr)    (!IS_SERIALIZED(ptr))
-#define IS_LEAKP_SERIALIZED(ptr)  (((ptr)->trace_id() & (LEAKP_SERIALIZED_TEST_BIT)) != 0)
-#define IS_NOT_LEAKP_SERIALIZED(ptr) (!IS_LEAKP_SERIALIZED(ptr))
-#define ANY_USED_BITS (USED_EPOCH_2_BIT         | \
-                       USED_EPOCH_1_BIT         | \
-                       METHOD_USED_EPOCH_2_BIT  | \
-                       METHOD_USED_EPOCH_1_BIT  | \
-                       LEAKP_USED_EPOCH_1_BIT    | \
-                       LEAKP_USED_EPOCH_2_BIT   | \
-                       SERIALIZED_TEST_BIT       | \
-                       LEAKP_SERIALIZED_TEST_BIT)
-
-#define TRACE_ID_META_BITS (EVENT_RESERVED | EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS | ANY_USED_BITS)
-
-#define ANY_EVENT                       (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
-#define IS_JDK_JFR_EVENT_KLASS(ptr)     (((ptr)->trace_id() & JDK_JFR_EVENT_KLASS) != 0)
-#define IS_EVENT_HOST_KLASS(ptr)        (((ptr)->trace_id() & EVENT_HOST_KLASS) != 0)
-#define IS_NOT_AN_EVENT_KLASS(ptr)      (!IS_EVENT_KLASS(ptr))
-#define IS_NOT_AN_EVENT_SUB_KLASS(ptr)  (!IS_JDK_JFR_EVENT_SUBKLASS(ptr))
-#define IS_NOT_JDK_JFR_EVENT_KLASS(ptr) (!IS_JDK_JFR_EVENT_KLASS(ptr))
-#define EVENT_FLAGS_MASK(ptr)           (((ptr)->trace_id() & ANY_EVENT) != 0)
-#define UNEVENT(ptr)                    ((ptr)->set_trace_id(((ptr)->trace_id()) & ~ANY_EVENT))
-
-#define TRACE_ID_SHIFT 16
+// static bits
+#define META_SHIFT                                8
+#define SERIALIZED_BIT                            ((USED_BIT << 1) << META_SHIFT)
+#define TRANSIENT_BIT                             (USED_BIT << META_SHIFT)
+#define TRACE_ID_SHIFT                            16
+#define METHOD_ID_NUM_MASK                        ((1 << TRACE_ID_SHIFT) - 1)
+#define META_BITS                                 (SERIALIZED_BIT | TRANSIENT_BIT)
+#define EVENT_BITS                                (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
+#define USED_BITS                                 (METHOD_USED_EPOCH_2_BIT | METHOD_USED_EPOCH_1_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)
+#define ALL_BITS                                  (META_BITS | EVENT_BITS | USED_BITS)
+#define ALL_BITS_MASK                             (~ALL_BITS)
 
-#define TRACE_ID_MASKED(id)             (id & ~TRACE_ID_META_BITS)
-#define TRACE_ID_VALUE(id)              (TRACE_ID_MASKED(id) >> TRACE_ID_SHIFT)
-#define TRACE_ID_MASKED_PTR(ptr)        (TRACE_ID_MASKED((ptr)->trace_id()))
-#define TRACE_ID_RAW(ptr)               ((ptr)->trace_id())
-#define TRACE_ID(ptr)                   (TRACE_ID_MASKED_PTR(ptr) >> TRACE_ID_SHIFT)
-#define METHOD_ID(kls, meth)            (TRACE_ID_MASKED_PTR(kls) | (meth)->orig_method_idnum())
-#define SET_TAG(ptr, tag)               (set_traceid_bits(tag, (ptr)->trace_id_addr()))
-#define SET_TAG_CAS(ptr, tag)           (set_traceid_bits_cas(tag, (ptr)->trace_id_addr()))
-#define SET_LEAKP_TAG_CAS(ptr, tag)     (set_leakp_traceid_bits_cas(tag, (ptr)->trace_id_addr()))
-#define SET_LEAKP_TAG(ptr, tag)         (SET_LEAKP_TAG_CAS(ptr, tag))
-
-#define SET_SERIALIZED(ptr)             (set_leakp_traceid_bits_cas((jbyte)SERIALIZED_BIT, (ptr)->trace_id_addr()))
-#define SET_LEAKP_SERIALIZED(ptr)       (set_leakp_traceid_bits_cas((jbyte)LEAKP_SERIALIZED_BIT, (ptr)->trace_id_addr()))
-#define UNSERIALIZE_MASK                (~(SERIALIZED_BIT | LEAKP_SERIALIZED_BIT))
-#define IN_USE_THIS_EPOCH_BIT           (JfrTraceIdEpoch::in_use_this_epoch_bit())
-#define IN_USE_THIS_EPOCH_UNSERIALIZED_BIT (IN_USE_THIS_EPOCH_BIT | SERIALIZED_TEST_BIT)
-#define IN_USE_PREV_EPOCH_BIT           (JfrTraceIdEpoch::in_use_prev_epoch_bit())
-#define LEAKP_IN_USE_THIS_EPOCH_BIT     (JfrTraceIdEpoch::leakp_in_use_this_epoch_bit())
-#define LEAKP_IN_USE_PREV_EPOCH_BIT     (JfrTraceIdEpoch::leakp_in_use_prev_epoch_bit())
+// epoch relative bits
+#define IN_USE_THIS_EPOCH_BIT                     (JfrTraceIdEpoch::in_use_this_epoch_bit())
+#define IN_USE_PREV_EPOCH_BIT                     (JfrTraceIdEpoch::in_use_prev_epoch_bit())
+#define METHOD_IN_USE_THIS_EPOCH_BIT              (JfrTraceIdEpoch::method_in_use_this_epoch_bit())
+#define METHOD_IN_USE_PREV_EPOCH_BIT              (JfrTraceIdEpoch::method_in_use_prev_epoch_bit())
+#define METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS   (JfrTraceIdEpoch::method_and_class_in_use_this_epoch_bits())
+#define METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS   (JfrTraceIdEpoch::method_and_class_in_use_prev_epoch_bits())
+#define METHOD_FLAG_IN_USE_THIS_EPOCH_BIT         ((jbyte)IN_USE_THIS_EPOCH_BIT)
+#define METHOD_FLAG_IN_USE_PREV_EPOCH_BIT         ((jbyte)IN_USE_PREV_EPOCH_BIT)
 
-#define METHOD_IN_USE_THIS_EPOCH_BIT    (JfrTraceIdEpoch::method_in_use_this_epoch_bit())
-#define METHOD_IN_USE_PREV_EPOCH_BIT    (JfrTraceIdEpoch::method_in_use_prev_epoch_bit())
-#define METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_this_epoch_bits())
-#define METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_prev_epoch_bits())
-
-#define UNUSE_THIS_EPOCH_MASK           (~(IN_USE_THIS_EPOCH_BIT))
-#define UNUSE_PREV_EPOCH_MASK           (~(IN_USE_PREV_EPOCH_BIT))
-#define LEAKP_UNUSE_THIS_EPOCH_MASK     UNUSE_THIS_EPOCH_MASK
-#define LEAKP_UNUSE_PREV_EPOCH_MASK     UNUSE_PREV_EPOCH_MASK
-
-#define UNUSE_METHOD_THIS_EPOCH_MASK    (~(METHOD_IN_USE_THIS_EPOCH_BIT))
-#define UNUSE_METHOD_PREV_EPOCH_MASK    (~(METHOD_IN_USE_PREV_EPOCH_BIT))
-#define LEAKP_UNUSE_METHOD_THIS_EPOCH_MASK (~(UNUSE_METHOD_THIS_EPOCH_MASK))
-#define LEAKP_UNUSE_METHOD_PREV_EPOCH_MASK (~UNUSE_METHOD_PREV_EPOCH_MASK))
-
-#define UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK (~(METHOD_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT))
-#define UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK (~(METHOD_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT))
-
-#define SET_USED_THIS_EPOCH(ptr)        (SET_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
-#define SET_USED_PREV_EPOCH(ptr)        (SET_TAG_CAS(ptr, IN_USE_PREV_EPOCH_BIT))
-#define SET_LEAKP_USED_THIS_EPOCH(ptr)  (SET_LEAKP_TAG_CAS(ptr, IN_USE_THIS_EPOCH_BIT))
-#define SET_LEAKP_USED_PREV_EPOCH(ptr)  (SET_LEAKP_TAG_CAS(ptr, IN_USE_PREV_EPOCH_BIT))
-#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (SET_TAG(kls, METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS))
+// operators
+#define TRACE_ID_RAW(ptr)                         ((ptr)->trace_id())
+#define TRACE_ID(ptr)                             (TRACE_ID_RAW(ptr) >> TRACE_ID_SHIFT)
+#define TRACE_ID_MASKED(ptr)                      (TRACE_ID_RAW(ptr) & ALL_BITS_MASK)
+#define TRACE_ID_PREDICATE(ptr, bits)             ((TRACE_ID_RAW(ptr) & bits) != 0)
+#define TRACE_ID_TAG(ptr, bits)                   (set_traceid_bits(bits, (ptr)->trace_id_addr()))
+#define TRACE_ID_TAG_CAS(ptr, bits)               (set_traceid_bits_cas(bits, (ptr)->trace_id_addr()))
+#define TRACE_ID_CLEAR(ptr, bits)                 (set_traceid_mask(bits, (ptr)->trace_id_addr()))
+#define TRACE_ID_META_TAG(ptr, bits)              (set_traceid_meta_bits(bits, (ptr)->trace_id_addr()))
+#define TRACE_ID_META_CLEAR(ptr, bits)            (set_traceid_meta_mask(bits, (ptr)->trace_id_addr()))
+#define METHOD_ID(kls, method)                    (TRACE_ID_MASKED(kls) | (method)->orig_method_idnum())
+#define METHOD_FLAG_PREDICATE(method, bits)       ((method)->is_trace_flag_set(bits))
+#define METHOD_FLAG_TAG(method, bits)             (set_bits(bits, (method)->trace_flags_addr()))
+#define METHOD_META_TAG(method, bits)             (set_meta_bits(bits, (method)->trace_meta_addr()))
+#define METHOD_FLAG_CLEAR(method, bits)           (clear_bits_cas(bits, (method)->trace_flags_addr()))
+#define METHOD_META_CLEAR(method, bits)           (clear_meta_bits(bits, (method)->trace_meta_addr()))
 
-#define USED_THIS_EPOCH(ptr)            (((ptr)->trace_id() & IN_USE_THIS_EPOCH_BIT) != 0)
-#define USED_THIS_EPOCH_UNSERIALIZED(ptr) ((USED_THIS_EPOCH(ptr)) && (IS_NOT_SERIALIZED(ptr)))
-#define NOT_USED_THIS_EPOCH(ptr)        (!USED_THIS_EPOCH(ptr))
-#define USED_PREV_EPOCH(ptr)            (((ptr)->trace_id() & IN_USE_PREV_EPOCH_BIT) != 0)
-#define USED_PREV_EPOCH_UNSERIALIZED(ptr) ((USED_PREV_EPOCH(ptr)) && (IS_NOT_SERIALIZED(ptr)))
-#define NOT_USED_PREV_EPOCH(ptr)        (!USED_PREV_EPOCH(ptr))
-#define USED_ANY_EPOCH(ptr)             (((ptr)->trace_id() & (USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)) != 0)
-#define NOT_USED_ANY_EPOCH(ptr)         (!USED_ANY_EPOCH(ptr))
-
-#define LEAKP_USED_THIS_EPOCH(ptr)      (((ptr)->trace_id() & LEAKP_IN_USE_THIS_EPOCH_BIT) != 0)
-#define LEAKP_NOT_USED_THIS_EPOCH(ptr)  (!LEAKP_USED_THIS_EPOCH(ptr))
-#define LEAKP_USED_PREV_EPOCH(ptr)      (((ptr)->trace_id() & LEAKP_IN_USE_PREV_EPOCH_BIT) != 0)
-#define LEAKP_NOT_USED_PREV_EPOCH(ptr)  (!LEAKP_USED_PREV_EPOCH(ptr))
-#define LEAKP_USED_ANY_EPOCH(ptr)       (((ptr)->trace_id() & (LEAKP_USED_EPOCH_2_BIT | LEAKP_USED_EPOCH_1_BIT)) != 0)
-#define LEAKP_NOT_USED_ANY_EPOCH(ptr)   (!LEAKP_USED_ANY_EPOCH(ptr))
-
-#define ANY_USED_THIS_EPOCH(ptr)        (((ptr)->trace_id() & (LEAKP_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT)) != 0)
-#define ANY_USED_THIS_EPOCH_UNSERIALIZED(ptr) ((ANY_USED_THIS_EPOCH(ptr)) && (IS_NOT_SERIALIZED(ptr)))
-#define ANY_NOT_USED_THIS_EPOCH(ptr)    (!ANY_USED_THIS_EPOCH(ptr))
-#define ANY_USED_PREV_EPOCH(ptr)        (((ptr)->trace_id() & (LEAKP_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT)) != 0)
-#define ANY_USED_PREV_EPOCH_UNSERIALIZED(ptr) ((ANY_USED_PREV_EPOCH(ptr)) && (IS_NOT_SERIALIZED(ptr)))
-#define ANY_NOT_USED_PREV_EPOCH(ptr)    (!ANY_USED_PREV_EPOCH(ptr))
-
-#define METHOD_USED_THIS_EPOCH(kls)     (((kls)->trace_id() & METHOD_IN_USE_THIS_EPOCH_BIT) != 0)
-#define METHOD_NOT_USED_THIS_EPOCH(kls) (!METHOD_USED_THIS_EPOCH(kls))
-#define METHOD_USED_PREV_EPOCH(kls)     (((kls)->trace_id() & METHOD_IN_USE_PREV_EPOCH_BIT) != 0)
-#define METHOD_NOT_USED_PREV_EPOCH(kls) (!METHOD_USED_PREV_EPOCH(kls))
-#define METHOD_USED_ANY_EPOCH(kls)      (((kls)->trace_id() & (METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)) != 0)
-
-#define METHOD_NOT_USED_ANY_EPOCH(kls)  (!METHOD_USED_ANY_EPOCH(kls))
-
-#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls) ((((kls)->trace_id() & METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS) == \
-                                                                     METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS) != 0)
-
-#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls) ((((kls)->trace_id() & METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS) == \
-                                                                     METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS) != 0)
-
-#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls)     ((METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls)) != 0)
-#define METHOD_AND_CLASS_NOT_USED_ANY_EPOCH(kls) (!METHOD_AND_CLASS_USED_ANY_EPOCH(kls))
+// predicates
+#define USED_THIS_EPOCH(ptr)                      (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_THIS_EPOCH_BIT)))
+#define USED_PREV_EPOCH(ptr)                      (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_PREV_EPOCH_BIT)))
+#define USED_ANY_EPOCH(ptr)                       (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)))
+#define METHOD_USED_THIS_EPOCH(kls)               (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_USED_PREV_EPOCH(kls)               (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_IN_USE_PREV_EPOCH_BIT)))
+#define METHOD_USED_ANY_EPOCH(kls)                (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls)     (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS)))
+#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls)     (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS)))
+#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls)      (METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls))
+#define METHOD_FLAG_USED_THIS_EPOCH(method)       (METHOD_FLAG_PREDICATE(method, (TRANSIENT_BIT | METHOD_FLAG_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_FLAG_USED_PREV_EPOCH(method)       (METHOD_FLAG_PREDICATE(method, (TRANSIENT_BIT | METHOD_FLAG_IN_USE_PREV_EPOCH_BIT)))
 
-#define LEAKP_METHOD_IN_USE_THIS_EPOCH  (LEAKP_IN_USE_THIS_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)
-#define LEAKP_METHOD_IN_USE_PREV_EPOCH  (LEAKP_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_PREV_EPOCH_BIT)
-#define LEAKP_METHOD_USED_THIS_EPOCH(ptr)  ((((ptr)->trace_id() & LEAKP_METHOD_IN_USE_THIS_EPOCH) == \
-                                                                  LEAKP_METHOD_IN_USE_THIS_EPOCH) != 0)
-#define LEAKP_METHOD_NOT_USED_THIS_EPOCH(kls) (!LEAKP_METHOD_USED_THIS_EPOCH(kls))
-#define LEAKP_METHOD_USED_PREV_EPOCH(ptr)  ((((ptr)->trace_id() & LEAKP_METHOD_IN_USE_PREV_EPOCH) == \
-                                                                  LEAKP_METHOD_IN_USE_PREV_EPOCH) != 0)
-#define LEAKP_METHOD_NOT_USED_PREV_EPOCH(kls) (!LEAKP_METHOD_USED_PREV_EPOCH(kls))
-
-#define UNUSE_THIS_EPOCH(ptr)           (set_traceid_mask(UNUSE_THIS_EPOCH_MASK, (ptr)->trace_id_addr()))
-#define UNUSE_PREV_EPOCH(ptr)           (set_traceid_mask(UNUSE_PREV_EPOCH_MASK, (ptr)->trace_id_addr()))
-#define UNUSE_METHOD_THIS_EPOCH(kls)    (set_traceid_mask(UNUSE_METHOD_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
-#define UNUSE_METHOD_PREV_EPOCH(kls)    (set_traceid_mask(UNUSE_METHOD_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
-
-#define LEAKP_UNUSE_THIS_EPOCH(ptr)     (set_leakp_traceid_mask(UNUSE_THIS_EPOCH_MASK, (ptr)->trace_id_addr()))
-#define LEAKP_UNUSE_PREV_EPOCH(ptr)     (set_leakp_traceid_mask(UNUSE_PREV_EPOCH_MASK, (ptr)->trace_id_addr()))
-#define LEAKP_UNUSE_METHOD_THIS_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
-#define LEAKP_UNUSE_METHOD_PREV_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
-
-#define UNSERIALIZE(ptr)                (set_leakp_traceid_mask(UNSERIALIZE_MASK, (ptr)->trace_id_addr()))
-#define ANY_USED(ptr)                   (((ptr)->trace_id() & ANY_USED_BITS) != 0)
-#define ANY_NOT_USED(ptr)               (!ANY_USED(ptr))
+// setters
+#define SET_USED_THIS_EPOCH(ptr)                  (TRACE_ID_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
+#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_TAG(kls, METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS))
+#define SET_METHOD_FLAG_USED_THIS_EPOCH(method)   (METHOD_FLAG_TAG(method, METHOD_FLAG_IN_USE_THIS_EPOCH_BIT))
+#define CLEAR_METHOD_AND_CLASS_THIS_EPOCH_MASK    (~(METHOD_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT))
+#define CLEAR_METHOD_AND_CLASS_PREV_EPOCH_MASK    (~(METHOD_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT))
+#define CLEAR_METHOD_AND_CLASS_THIS_EPOCH(kls)    (TRACE_ID_CLEAR(kls, CLEAR_METHOD_AND_CLASS_THIS_EPOCH_MASK))
+#define CLEAR_METHOD_AND_CLASS_PREV_EPOCH(kls)    (TRACE_ID_CLEAR(kls, CLEAR_METHOD_AND_CLASS_PREV_EPOCH_MASK))
+#define CLEAR_METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_CLEAR(method, METHOD_FLAG_IN_USE_THIS_EPOCH_BIT))
+#define CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method) (METHOD_FLAG_CLEAR(method, METHOD_FLAG_IN_USE_PREV_EPOCH_BIT))
 
-#define UNUSE_METHOD_AND_CLASS_THIS_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
-#define LEAKP_UNUSE_METHOD_AND_CLASS_THIS_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
-#define UNUSE_METHOD_AND_CLASS_PREV_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
-#define LEAKP_UNUSE_METHODS_AND_CLASS_PREV_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
+// types
+#define IS_JDK_JFR_EVENT_KLASS(kls)               (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_KLASS))
+#define IS_JDK_JFR_EVENT_SUBKLASS(kls)            (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_SUBKLASS))
+#define IS_NOT_AN_EVENT_SUB_KLASS(kls)            (!IS_JDK_JFR_EVENT_SUBKLASS(kls))
+#define IS_EVENT_HOST_KLASS(kls)                  (TRACE_ID_PREDICATE(kls, EVENT_HOST_KLASS))
+#define SET_JDK_JFR_EVENT_KLASS(kls)              (TRACE_ID_TAG(kls, JDK_JFR_EVENT_KLASS))
+#define SET_JDK_JFR_EVENT_SUBKLASS(kls)           (TRACE_ID_TAG(kls, JDK_JFR_EVENT_SUBKLASS))
+#define SET_EVENT_HOST_KLASS(kls)                 (TRACE_ID_TAG(kls, EVENT_HOST_KLASS))
+#define EVENT_KLASS_MASK(kls)                     (TRACE_ID_RAW(kls) & EVENT_BITS)
 
-#define IS_METHOD_SERIALIZED(m)              ((m)->is_trace_flag_set((jbyte)SERIALIZED_BIT))
-#define METHOD_NOT_SERIALIZED(m)             (!IS_METHOD_SERIALIZED(m))
-#define SET_METHOD_SERIALIZED(m)             (set_bits_cas((jbyte)SERIALIZED_BIT, (m)->trace_flags_addr()))
-#define UNSERIALIZE_METHOD(m)                (clear_bits_cas((jbyte)SERIALIZED_BIT, (m)->trace_flags_addr()))
-#define IS_LEAKP_METHOD_SERIALIZED(m)        ((m)->is_trace_flag_set((jbyte)LEAKP_SERIALIZED_BIT))
-#define METHOD_NOT_LEAKP_SERIALIZED(m)       (!IS_LEAKP_METHOD_SERIALIZED(m))
-#define SET_LEAKP_METHOD_SERIALIZED(m)       (set_bits_cas((jbyte)LEAKP_SERIALIZED_BIT, (m)->trace_flags_addr()))
-#define UNSERIALIZE_LEAKP_METHOD(m)          (clear_bits_cas((jbyte)LEAKP_SERIALIZED_BIT, (m)->trace_flags_addr()))
-#define METHOD_FLAG_USED_THIS_EPOCH(m)       ((m)->is_trace_flag_set((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit()))
-#define METHOD_FLAG_NOT_USED_THIS_EPOCH(m)   (!METHOD_FLAG_USED_THIS_EPOCH(m))
-#define SET_METHOD_FLAG_USED_THIS_EPOCH(m)   (set_bits_cas((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit(), (m)->trace_flags_addr()))
-#define METHOD_FLAG_USED_PREV_EPOCH(m)       ((m)->is_trace_flag_set((jbyte)JfrTraceIdEpoch::in_use_prev_epoch_bit()))
-#define METHOD_FLAG_NOT_USED_PREV_EPOCH(m)   (!METHOD_FLAG_USED_PREV_EPOCH(m))
-#define METHOD_FLAG_USED_ANY_EPOCH(m)        ((METHOD_FLAG_USED_THIS_EPOCH(m) || METHOD_FLAG_USED_PREV_EPOCH(m)) != 0)
-#define METHOD_FLAG_NOT_USED_ANY_EPOCH(m)    ((METHOD_FLAG_NOT_USED_THIS_EPOCH(m) && METHOD_FLAG_NOT_USED_PREV_EPOCH(m)) != 0)
-#define CLEAR_METHOD_FLAG_USED_THIS_EPOCH(m) (clear_bits_cas((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit() | SERIALIZED_BIT, (m)->trace_flags_addr()))
-#define CLEAR_METHOD_FLAG_USED_PREV_EPOCH(m) (clear_bits_cas((jbyte)JfrTraceIdEpoch::in_use_prev_epoch_bit() | SERIALIZED_BIT, (m)->trace_flags_addr()))
+// meta
+#define SET_TRANSIENT(ptr)                        (TRACE_ID_META_TAG(ptr, USED_BIT))
+#define IS_SERIALIZED(ptr)                        (TRACE_ID_PREDICATE(ptr, SERIALIZED_BIT))
+#define IS_NOT_SERIALIZED(ptr)                    (!IS_SERIALIZED(ptr))
+#define SET_SERIALIZED(ptr)                       (TRACE_ID_META_TAG(ptr, (USED_BIT << 1)))
+#define CLEAR_SERIALIZED(ptr)                     (TRACE_ID_META_CLEAR(ptr, (~(USED_BIT << 1 | USED_BIT))))
+#define IS_METHOD_SERIALIZED(method)              (METHOD_FLAG_PREDICATE(method, SERIALIZED_BIT))
+#define METHOD_NOT_SERIALIZED(method)             (!IS_METHOD_SERIALIZED(method))
+#define SET_METHOD_TRANSIENT(method)              (METHOD_META_TAG(method, USED_BIT))
+#define SET_METHOD_SERIALIZED(method)             (METHOD_META_TAG(method, (USED_BIT << 1)))
+#define CLEAR_METHOD_SERIALIZED(method)           (METHOD_META_CLEAR(method, (USED_BIT << 1 | USED_BIT)))
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
--- a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -194,9 +194,6 @@
     if (!validate_recording_options(thread)) {
       return false;
     }
-    if (!JfrJavaEventWriter::initialize()) {
-      return false;
-    }
     if (!JfrOptionSet::configure(thread)) {
       return false;
     }
@@ -246,6 +243,9 @@
   ResourceMark rm;
   HandleMark hm;
 
+  if (!create_java_event_writer()) {
+    return false;
+  }
   if (!create_jvmti_agent()) {
     return false;
   }
@@ -287,6 +287,10 @@
 static JfrOSInterface* _os_interface = NULL;
 static JfrThreadSampling* _thread_sampling = NULL;
 
+bool JfrRecorder::create_java_event_writer() {
+  return JfrJavaEventWriter::initialize();
+}
+
 bool JfrRecorder::create_jvmti_agent() {
   return JfrOptionSet::allow_retransforms() ? JfrJvmtiAgent::create() : true;
 }
--- a/src/hotspot/share/jfr/recorder/jfrRecorder.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/jfrRecorder.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -40,6 +40,7 @@
  private:
   static bool create_checkpoint_manager();
   static bool create_chunk_repository();
+  static bool create_java_event_writer();
   static bool create_jvmti_agent();
   static bool create_os_interface();
   static bool create_post_box();
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -32,6 +32,10 @@
 #include "runtime/os.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
+static const char* const MAGIC = "FLR";
+static const u2 JFR_VERSION_MAJOR = 2;
+static const u2 JFR_VERSION_MINOR = 0;
+
 static const u1 GUARD = 0xff;
 
 static jlong nanos_now() {
@@ -66,6 +70,29 @@
   _generation = 1;
 }
 
+const char* JfrChunk::magic() const {
+  return MAGIC;
+}
+
+u2 JfrChunk::major_version() const {
+  return JFR_VERSION_MAJOR;
+}
+
+u2 JfrChunk::minor_version() const {
+  return JFR_VERSION_MINOR;
+}
+
+u2 JfrChunk::capabilities() const {
+  // chunk capabilities, CompressedIntegers etc
+  static bool compressed_integers = JfrOptionSet::compressed_integers();
+  return compressed_integers;
+}
+
+int64_t JfrChunk::cpu_frequency() const {
+  static const jlong frequency = JfrTime::frequency();
+  return frequency;
+}
+
 void JfrChunk::set_last_checkpoint_offset(int64_t offset) {
   _last_checkpoint_offset = offset;
 }
@@ -190,3 +217,8 @@
   return this_generation;
 }
 
+u1 JfrChunk::next_generation() const {
+  assert(_generation > 0, "invariant");
+  const u1 next_gen = _generation;
+  return GUARD == next_gen ? 1 : next_gen;
+}
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunk.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunk.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -45,13 +45,19 @@
 
   JfrChunk();
   ~JfrChunk();
+  void reset();
+
+  const char* magic() const;
+  u2 major_version() const;
+  u2 minor_version() const;
+  int64_t cpu_frequency() const;
+  u2 capabilities() const;
 
   void update_start_ticks();
   void update_start_nanos();
   void save_current_and_update_start_ticks();
   void save_current_and_update_start_nanos();
 
-  void reset();
   int64_t last_checkpoint_offset() const;
   void set_last_checkpoint_offset(int64_t offset);
 
@@ -78,6 +84,7 @@
 
   int64_t duration() const;
   u1 generation() const;
+  u1 next_generation() const;
 };
 
 #endif // SHARE_VM_JFR_RECORDER_REPOSITORY_JFRRCHUNK_HPP
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -33,9 +33,6 @@
 #include "runtime/os.hpp"
 #include "runtime/os.inline.hpp"
 
-static const u2 JFR_VERSION_MAJOR = 2;
-static const u2 JFR_VERSION_MINOR = 0;
-
 static const int64_t MAGIC_OFFSET = 0;
 static const int64_t MAGIC_LEN = 4;
 static const int64_t VERSION_OFFSET = MAGIC_LEN;
@@ -51,60 +48,54 @@
 static const int64_t CAPABILITY_OFFSET = GENERATION_OFFSET + 2;
 static const int64_t HEADER_SIZE = CAPABILITY_OFFSET + 2;
 static const int64_t RESERVE_SIZE = GENERATION_OFFSET - (4 * SIZE_OFFSET);
-static const int64_t VOLATILE_FIELD_SIZE = SLOT_SIZE * 2;
 
 static const u1 COMPLETE = 0;
 static const u1 GUARD = 0xff;
 static const u1 PAD = 0;
-static const size_t GENERATION_SIZE = sizeof(u2);
-static const size_t HEAD_BUFFER_SIZE = HEADER_SIZE + SLOT_SIZE;
 
 typedef NoOwnershipAdapter JfrHeadBuffer; // stack local array as buffer
 typedef StreamWriterHost<JfrHeadBuffer, StackObj> JfrBufferedHeadWriter;
 typedef WriterHost<BigEndianEncoder, BigEndianEncoder, JfrBufferedHeadWriter> JfrHeadWriterBase;
 
-static uint8_t head_buffer[HEAD_BUFFER_SIZE] = {0};
 
 static fio_fd open_chunk(const char* path) {
   return path != NULL ? os::open(path, O_CREAT | O_RDWR, S_IREAD | S_IWRITE) : invalid_fd;
 }
 
+#ifdef ASSERT
+static void assert_writer_position(JfrChunkWriter* writer, int64_t offset) {
+  assert(writer != NULL, "invariant");
+  assert(offset == writer->current_offset(), "invariant");
+}
+#endif
+
 class JfrChunkHeadWriter : public StackObj {
-  friend class JfrChunkWriter;
  private:
   JfrChunkWriter* _writer;
   JfrChunk* _chunk;
-
+ public:
   void write_magic() {
-    assert(MAGIC_OFFSET == _writer->current_offset(), "invariant");
-    _writer->bytes("FLR", MAGIC_LEN);
+    _writer->bytes(_chunk->magic(), MAGIC_LEN);
   }
 
   void write_version() {
-    assert(VERSION_OFFSET == _writer->current_offset(), "invariant");
-    _writer->be_write((u2)JFR_VERSION_MAJOR);
-    _writer->be_write((u2)JFR_VERSION_MINOR);
+    _writer->be_write(_chunk->major_version());
+    _writer->be_write(_chunk->minor_version());
   }
 
   void write_size(int64_t size) {
-    assert(SIZE_OFFSET == _writer->current_offset(), "invariant");
     _writer->be_write(size);
   }
 
   void write_checkpoint() {
-    assert(CHECKPOINT_OFFSET == _writer->current_offset(), "invariant");
     _writer->be_write(_chunk->last_checkpoint_offset());
   }
 
   void write_metadata() {
-    assert(METADATA_OFFSET == _writer->current_offset(), "invariant");
     _writer->be_write(_chunk->last_metadata_offset());
   }
 
   void write_time(bool finalize) {
-    assert(_writer->is_valid(), "invariant");
-    assert(_chunk != NULL, "invariant");
-    assert(START_NANOS_OFFSET == _writer->current_offset(), "invariant");
     if (finalize) {
       _writer->be_write(_chunk->previous_start_nanos());
       _writer->be_write(_chunk->last_chunk_duration());
@@ -117,68 +108,64 @@
   }
 
   void write_cpu_frequency() {
-    assert(CPU_FREQUENCY_OFFSET == _writer->current_offset(), "invariant");
-    static const jlong frequency = JfrTime::frequency();
-    _writer->be_write(frequency);
-  }
-
-  void write_capabilities() {
-    assert(CAPABILITY_OFFSET == _writer->current_offset(), "invariant");
-    // chunk capabilities, CompressedIntegers etc
-    static bool compressed_integers = JfrOptionSet::compressed_integers();
-    _writer->be_write(compressed_integers ? (u2)1 : (u2)0);
+    _writer->be_write(_chunk->cpu_frequency());
   }
 
   void write_generation(bool finalize) {
-    assert(GENERATION_OFFSET == _writer->current_offset(), "invariant");
     _writer->be_write(finalize ? COMPLETE : _chunk->generation());
     _writer->be_write(PAD);
   }
 
+  void write_next_generation() {
+    _writer->be_write(_chunk->next_generation());
+    _writer->be_write(PAD);
+  }
+
   void write_guard() {
-    assert(GENERATION_OFFSET == _writer->current_offset(), "invariant");
     _writer->be_write(GUARD);
     _writer->be_write(PAD);
   }
 
   void write_guard_flush() {
-    assert(GENERATION_OFFSET == _writer->current_offset(), "invariant");
     write_guard();
     _writer->flush();
   }
 
-  void initialize() {
-    assert(_writer->is_valid(), "invariant");
-    assert(_chunk != NULL, "invariant");
-    assert(0 == _writer->current_offset(), "invariant");
-    write_magic();
-    write_version();
-    write_size(HEADER_SIZE);
-    write_checkpoint();
-    write_metadata();
-    write_time(false);
-    write_cpu_frequency();
-    write_generation(false);
-    write_capabilities();
-    assert(HEADER_SIZE == _writer->current_offset(), "invariant");
-    _writer->flush();
+  void write_capabilities() {
+    _writer->be_write(_chunk->capabilities());
   }
 
-  void flush(int64_t size, bool finalize) {
-    assert(_writer->is_valid(), "invariant");
-    assert(_chunk != NULL, "invariant");
-    assert(SIZE_OFFSET == _writer->current_offset(), "invariant");
+  void write_size_to_generation(int64_t size, bool finalize) {
     write_size(size);
     write_checkpoint();
     write_metadata();
     write_time(finalize);
     write_cpu_frequency();
     write_generation(finalize);
+  }
+
+  void flush(int64_t size, bool finalize) {
+    assert(_writer->is_valid(), "invariant");
+    assert(_chunk != NULL, "invariant");
+    DEBUG_ONLY(assert_writer_position(_writer, SIZE_OFFSET);)
+    write_size_to_generation(size, finalize);
     // no need to write capabilities
     _writer->seek(size); // implicit flush
   }
 
-  JfrChunkHeadWriter(JfrChunkWriter* writer, int64_t offset) : _writer(writer), _chunk(writer->_chunk) {
+  void initialize() {
+    assert(_writer->is_valid(), "invariant");
+    assert(_chunk != NULL, "invariant");
+    DEBUG_ONLY(assert_writer_position(_writer, 0);)
+    write_magic();
+    write_version();
+    write_size_to_generation(HEADER_SIZE, false);
+    write_capabilities();
+    DEBUG_ONLY(assert_writer_position(_writer, HEADER_SIZE);)
+    _writer->flush();
+  }
+
+  JfrChunkHeadWriter(JfrChunkWriter* writer, int64_t offset, bool head = true) : _writer(writer), _chunk(writer->_chunk) {
     assert(_writer != NULL, "invariant");
     assert(_writer->is_valid(), "invariant");
     assert(_chunk != NULL, "invariant");
@@ -186,14 +173,67 @@
       assert(HEADER_SIZE == offset, "invariant");
       initialize();
     } else {
-      _writer->seek(GENERATION_OFFSET);
-      write_guard();
-      _writer->seek(offset);
+      if (head) {
+        _writer->seek(GENERATION_OFFSET);
+        write_guard();
+        _writer->seek(offset);
+      }
     }
-    assert(offset == _writer->current_offset(), "invariant");
+    DEBUG_ONLY(assert_writer_position(_writer, offset);)
   }
 };
 
+static void write_checkpoint_header(JfrChunkWriter& cw, int64_t event_offset, bool flushpoint) {
+  const int64_t delta = cw.last_checkpoint_offset() == 0 ? 0 : cw.last_checkpoint_offset() - event_offset;
+  cw.reserve(sizeof(u4));
+  cw.write<u8>(EVENT_CHECKPOINT);
+  cw.write<u8>(JfrTicks::now().value());
+  cw.write<u8>(0); // duration
+  cw.write<u8>(delta); // to previous checkpoint
+  cw.write<bool>(flushpoint);
+  cw.write<u4>(1); // pool count
+  cw.write<u8>(TYPE_CHUNKHEADER);
+  cw.write<u4>(1); // count
+  cw.write<u8>(1); // key
+  cw.write<u4>(HEADER_SIZE); // length of byte array
+}
+
+int64_t JfrChunkWriter::write_chunk_header_checkpoint(bool flushpoint) {
+  assert(this->has_valid_fd(), "invariant");
+  const int64_t event_size_offset = current_offset();
+  write_checkpoint_header(*this, event_size_offset, flushpoint);
+  const int64_t start_offset = current_offset();
+  JfrChunkHeadWriter head(this, start_offset, false);
+  head.write_magic();
+  head.write_version();
+  const int64_t size_offset = reserve(sizeof(int64_t));
+  be_write(event_size_offset); // last checkpoint offset will be this checkpoint
+  head.write_metadata();
+  head.write_time(false);
+  head.write_cpu_frequency();
+  head.write_next_generation();
+  head.write_capabilities();
+  assert(current_offset() - start_offset == HEADER_SIZE, "invariant");
+  const u4 checkpoint_size = current_offset() - event_size_offset;
+  write_padded_at_offset<u4>(checkpoint_size, event_size_offset);
+  set_last_checkpoint_offset(event_size_offset);
+  const size_t sz_written = size_written();
+  write_be_at_offset(sz_written, size_offset);
+  return sz_written;
+}
+
+int64_t JfrChunkWriter::flushpoint(bool flushpoint) {
+  assert(_chunk != NULL, "invariant");
+  if (flushpoint) {
+    _chunk->update();
+  }
+  const int64_t sz_written = write_chunk_header_checkpoint(flushpoint);
+  assert(size_written() == sz_written, "invariant");
+  JfrChunkHeadWriter head(this, SIZE_OFFSET);
+  head.flush(sz_written, !flushpoint);
+  return sz_written;
+}
+
 JfrChunkWriter::JfrChunkWriter() : JfrChunkWriterBase(NULL), _chunk(new JfrChunk()) {}
 
 JfrChunkWriter::~JfrChunkWriter() {
@@ -211,17 +251,6 @@
   _chunk->update_time_to_now();
 }
 
-int64_t JfrChunkWriter::flushpoint(bool finalize) {
-  assert(_chunk != NULL, "invariant");
-  const int64_t sz_written = size_written();
-  if (!finalize) {
-    _chunk->update();
-  }
-  JfrChunkHeadWriter head(this, SIZE_OFFSET);
-  head.flush(sz_written, finalize);
-  return sz_written;
-}
-
 int64_t JfrChunkWriter::size_written() const {
   return this->is_valid() ? this->current_offset() : 0;
 }
@@ -272,7 +301,7 @@
 
 int64_t JfrChunkWriter::close() {
   assert(this->has_valid_fd(), "invariant");
-  const int64_t size_written = flushpoint(true);
+  const int64_t size_written = flushpoint(false);
   this->close_fd();
   assert(!this->is_valid(), "invariant");
   return size_written;
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -46,6 +46,7 @@
   bool open();
   int64_t close();
   int64_t current_chunk_start_nanos() const;
+  int64_t write_chunk_header_checkpoint(bool flushpoint);
 
  public:
   JfrChunkWriter();
--- a/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -175,5 +175,5 @@
 }
 
 size_t JfrRepository::flush_chunk() {
-  return _chunkwriter->flushpoint(false);
+  return _chunkwriter->flushpoint(true);
 }
--- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -25,7 +25,9 @@
 #include "precompiled.hpp"
 #include "jfrfiles/jfrEventClasses.hpp"
 #include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/recorder/jfrRecorder.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
@@ -273,16 +275,15 @@
 
 template <typename Functor>
 static void write_flush_event(Functor& f) {
-  if (!Functor::is_event_enabled()) {
-    return;
+  if (Functor::is_event_enabled()) {
+    typename Functor::EventType e(UNTIMED);
+    e.set_starttime(f.start_time());
+    e.set_endtime(f.end_time());
+    e.set_flushId(flushpoint_id);
+    e.set_elements(f.elements());
+    e.set_size(f.size());
+    e.commit();
   }
-  typename Functor::EventType e(UNTIMED);
-  e.set_starttime(f.start_time());
-  e.set_endtime(f.end_time());
-  e.set_flushId(flushpoint_id);
-  e.set_elements(f.elements());
-  e.set_size(f.size());
-  e.commit();
 }
 
 template <typename Functor>
@@ -434,18 +435,16 @@
     vm_error = true;
     prepare_for_vm_error_rotation();
   }
+  if (!_storage.control().to_disk()) {
+    in_memory_rotation();
+  } else if (vm_error) {
+    vm_error_rotation();
+  } else {
+    chunk_rotation();
+  }
   if (msgs & (MSGBIT(MSG_STOP))) {
     stop();
   }
-  if (!_storage.control().to_disk()) {
-    in_memory_rotation();
-    return;
-  }
-  if (vm_error) {
-    vm_error_rotation();
-    return;
-  }
-  chunk_rotation();
 }
 
 void JfrRecorderService::prepare_for_vm_error_rotation() {
@@ -630,25 +629,29 @@
   Flush fl(_chunkwriter, flushpoint);
   invoke_with_flush_event(fl);
   write_thread_local_buffer(_chunkwriter);
+  _checkpoint_manager.flush();
   _repository.flush_chunk();
 }
 
 //
 // pre-safepoint write sequence
 //
-//  write checkpoint epoch transition list->
-//    write stack trace checkpoint ->
-//      write string pool checkpoint ->
-//        notify about pending rotation ->
-//          write storage
+//  write stack trace checkpoint ->
+//    write string pool checkpoint ->
+//      notify about pending rotation ->
+//        write storage
 //
 void JfrRecorderService::pre_safepoint_write() {
   assert(_chunkwriter.is_valid(), "invariant");
-  _checkpoint_manager.write_epoch_transition_mspace();
   flush_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, false);
   if (_string_pool.modified()) {
     flush_stringpool_checkpoint(_string_pool, _chunkwriter);
   }
+  if (LeakProfiler::is_running()) {
+    // Exclusive access to the object sampler instance.
+    // The sampler is released (unlocked) later in post_safepoint_write.
+    ObjectSampleCheckpoint::rotate(ObjectSampler::acquire(), _stack_trace_repository);
+  }
   _checkpoint_manager.notify_types_on_rotation();
   _storage.write();
 }
@@ -658,11 +661,6 @@
   VMThread::execute(&safepoint_task);
 }
 
-static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_repository) {
-  WriteObjectSampleStacktrace object_sample_stacktrace(stack_trace_repository);
-  object_sample_stacktrace.process();
-}
-
 //
 // safepoint write sequence
 //
@@ -676,11 +674,14 @@
 //
 void JfrRecorderService::safepoint_write() {
   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  write_object_sample_stacktrace(_stack_trace_repository);
+
   flush_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, true);
   if (_string_pool.modified()) {
     flush_stringpool_checkpoint_safepoint(_string_pool, _chunkwriter);
   }
+  if (LeakProfiler::is_running()) {
+    ObjectSampleCheckpoint::resolve_sampled_objects();
+  }
   _storage.write_at_safepoint();
   _checkpoint_manager.notify_threads();
   _checkpoint_manager.shift_epoch();
@@ -702,6 +703,11 @@
   // already tagged artifacts for the previous epoch. We can accomplish this concurrently
   // with threads now tagging artifacts in relation to the new, now updated, epoch and remain outside of a safepoint.
   _checkpoint_manager.write_type_set();
+  if (LeakProfiler::is_running()) {
+    // The object sampler instance was exclusively acquired and locked in pre_safepoint_write.
+    // Note: There is a dependency on write_type_set() above, ensure the release is subsequent.
+    ObjectSampler::release();
+  }
   // serialize any outstanding checkpoint memory
   _checkpoint_manager.write();
   // serialize the metadata descriptor event and close out the chunk
@@ -723,7 +729,6 @@
   // Do not attempt safepoint dependent operations during emergency dump.
   // Optimistically write tagged artifacts.
   _checkpoint_manager.shift_epoch();
-  _checkpoint_manager.write_type_set();
   // update time
   _chunkwriter.time_stamp_chunk_now();
   post_safepoint_write();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/vframe.inline.hpp"
+
+static void copy_frames(JfrStackFrame** lhs_frames, u4 length, const JfrStackFrame* rhs_frames) {
+  assert(lhs_frames != NULL, "invariant");
+  assert(rhs_frames != NULL, "invariant");
+  if (length > 0) {
+    *lhs_frames = NEW_C_HEAP_ARRAY(JfrStackFrame, length, mtTracing);
+    memcpy(*lhs_frames, rhs_frames, length * sizeof(JfrStackFrame));
+  }
+}
+
+JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, InstanceKlass* k) :
+  _klass(k), _methodid(id), _line(0), _bci(bci), _type(type) {}
+
+JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno) :
+  _klass(NULL), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
+
+JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
+  _next(NULL),
+  _frames(frames),
+  _id(0),
+  _hash(0),
+  _nr_of_frames(0),
+  _max_frames(max_frames),
+  _frames_ownership(false),
+  _reached_root(false),
+  _lineno(false),
+  _written(false) {}
+
+JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next) :
+  _next(next),
+  _frames(NULL),
+  _id(id),
+  _hash(trace._hash),
+  _nr_of_frames(trace._nr_of_frames),
+  _max_frames(trace._max_frames),
+  _frames_ownership(true),
+  _reached_root(trace._reached_root),
+  _lineno(trace._lineno),
+  _written(false) {
+  copy_frames(&_frames, trace._nr_of_frames, trace._frames);
+}
+
+JfrStackTrace::~JfrStackTrace() {
+  if (_frames_ownership && _frames != NULL) {
+    FREE_C_HEAP_ARRAY(JfrStackFrame, _frames);
+  }
+}
+
+void JfrStackTrace::operator=(const JfrStackTrace& trace) {
+  assert(_next == NULL, "invariant");
+  assert(_frames_ownership, "invariant");
+
+  if (_id == trace._id) {
+    assert(_hash == trace._hash, "invariant");
+    assert(_nr_of_frames == trace._nr_of_frames, "invariant");
+    return;
+  }
+  _next = NULL;
+  _id = trace._id;
+  _hash = trace._hash;
+  _nr_of_frames = trace._nr_of_frames;
+  _max_frames = trace._max_frames;
+  _reached_root = trace._reached_root;
+  _lineno = trace._lineno;
+  _written = false;
+  copy_frames(&_frames, trace._nr_of_frames, trace._frames);
+}
+
+template <typename Writer>
+static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
+  w.write((u8)id);
+  w.write((u1)!reached_root);
+  w.write(nr_of_frames);
+  for (u4 i = 0; i < nr_of_frames; ++i) {
+    frames[i].write(w);
+  }
+}
+
+void JfrStackTrace::write(JfrChunkWriter& sw) const {
+  assert(!_written, "invariant");
+  write_stacktrace(sw, _id, _reached_root, _nr_of_frames, _frames);
+  _written = true;
+}
+
+void JfrStackTrace::write(JfrCheckpointWriter& cpw) const {
+  write_stacktrace(cpw, _id, _reached_root, _nr_of_frames, _frames);
+}
+
+bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
+  return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
+}
+
+bool JfrStackTrace::equals(const JfrStackTrace& rhs) const {
+  if (_reached_root != rhs._reached_root || _nr_of_frames != rhs._nr_of_frames || _hash != rhs._hash) {
+    return false;
+  }
+  for (u4 i = 0; i < _nr_of_frames; ++i) {
+    if (!_frames[i].equals(rhs._frames[i])) {
+      return false;
+    }
+  }
+  return true;
+}
+
+template <typename Writer>
+static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
+  w.write((u8)methodid);
+  w.write((u4)line);
+  w.write((u4)bci);
+  w.write((u8)type);
+}
+
+void JfrStackFrame::write(JfrChunkWriter& cw) const {
+  write_frame(cw, _methodid, _line, _bci, _type);
+}
+
+void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
+  write_frame(cpw, _methodid, _line, _bci, _type);
+}
+
+class vframeStreamSamples : public vframeStreamCommon {
+ public:
+  // constructor that starts with sender of frame fr (top_frame)
+  vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
+    _stop_at_java_call_stub = stop_at_java_call_stub;
+    _frame = fr;
+
+    // We must always have a valid frame to start filling
+    bool filled_in = fill_from_frame();
+    assert(filled_in, "invariant");
+  }
+  void samples_next();
+  void stop() {}
+};
+
+// Solaris SPARC Compiler1 needs an additional check on the grandparent
+// of the top_frame when the parent of the top_frame is interpreted and
+// the grandparent is compiled. However, in this method we do not know
+// the relationship of the current _frame relative to the top_frame so
+// we implement a more broad sanity check. When the previous callee is
+// interpreted and the current sender is compiled, we verify that the
+// current sender is also walkable. If it is not walkable, then we mark
+// the current vframeStream as at the end.
+void vframeStreamSamples::samples_next() {
+  // handle frames with inlining
+  if (_mode == compiled_mode &&
+    vframeStreamCommon::fill_in_compiled_inlined_sender()) {
+    return;
+  }
+
+  // handle general case
+  u4 loop_count = 0;
+  u4 loop_max = MAX_STACK_DEPTH * 2;
+  do {
+    loop_count++;
+    // By the time we get here we should never see unsafe but better safe then segv'd
+    if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) {
+      _mode = at_end_mode;
+      return;
+    }
+    _frame = _frame.sender(&_reg_map);
+  } while (!fill_from_frame());
+}
+
+bool JfrStackTrace::record_thread(JavaThread& thread, frame& frame) {
+  vframeStreamSamples st(&thread, frame, false);
+  u4 count = 0;
+  _reached_root = true;
+
+  while (!st.at_end()) {
+    if (count >= _max_frames) {
+      _reached_root = false;
+      break;
+    }
+    const Method* method = st.method();
+    if (!Method::is_valid_method(method)) {
+      // we throw away everything we've gathered in this sample since
+      // none of it is safe
+      return false;
+    }
+    const traceid mid = JfrTraceId::use(method);
+    int type = st.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
+    int bci = 0;
+    if (method->is_native()) {
+      type = JfrStackFrame::FRAME_NATIVE;
+    } else {
+      bci = st.bci();
+    }
+    const int lineno = method->line_number_from_bci(bci);
+    // Can we determine if it's inlined?
+    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
+    _frames[count] = JfrStackFrame(mid, bci, type, method->constants()->pool_holder());
+    st.samples_next();
+    count++;
+  }
+
+  _lineno = true;
+  _nr_of_frames = count;
+  return true;
+}
+
+void JfrStackFrame::resolve_lineno() const {
+  assert(_klass, "no klass pointer");
+  assert(_line == 0, "already have linenumber");
+  const int id_num = _methodid & METHOD_ID_NUM_MASK;
+  const Method* const method = _klass->method_with_orig_idnum(id_num);
+  assert(method != NULL, "invariant");
+  _line = method->line_number_from_bci(_bci);
+}
+
+void JfrStackTrace::resolve_linenos() const {
+  for (unsigned int i = 0; i < _nr_of_frames; i++) {
+    _frames[i].resolve_lineno();
+  }
+  _lineno = true;
+}
+
+bool JfrStackTrace::record_safe(JavaThread* thread, int skip) {
+  assert(thread == Thread::current(), "Thread stack needs to be walkable");
+  vframeStream vfs(thread);
+  u4 count = 0;
+  _reached_root = true;
+  for (int i = 0; i < skip; i++) {
+    if (vfs.at_end()) {
+      break;
+    }
+    vfs.next();
+  }
+
+  while (!vfs.at_end()) {
+    if (count >= _max_frames) {
+      _reached_root = false;
+      break;
+    }
+    const Method* method = vfs.method();
+    const traceid mid = JfrTraceId::use(method);
+    int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
+    int bci = 0;
+    if (method->is_native()) {
+      type = JfrStackFrame::FRAME_NATIVE;
+    }
+    else {
+      bci = vfs.bci();
+    }
+    // Can we determine if it's inlined?
+    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
+    _frames[count] = JfrStackFrame(mid, bci, type, method->constants()->pool_holder());
+    vfs.next();
+    count++;
+  }
+
+  _nr_of_frames = count;
+  return true;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
+#define SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+
+class frame;
+class InstanceKlass;
+class JavaThread;
+class JfrCheckpointWriter;
+class JfrChunkWriter;
+class Method;
+
+class JfrStackFrame {
+  friend class ObjectSampleCheckpoint;
+ private:
+  mutable InstanceKlass* _klass;
+  traceid _methodid;
+  mutable int _line;
+  int _bci;
+  u1 _type;
+
+ public:
+  JfrStackFrame(const traceid& id, int bci, int type, InstanceKlass* klass);
+  JfrStackFrame(const traceid& id, int bci, int type, int lineno);
+
+  bool equals(const JfrStackFrame& rhs) const;
+  void write(JfrChunkWriter& cw) const;
+  void write(JfrCheckpointWriter& cpw) const;
+  void resolve_lineno() const;
+
+  enum {
+    FRAME_INTERPRETER = 0,
+    FRAME_JIT,
+    FRAME_INLINE,
+    FRAME_NATIVE,
+    NUM_FRAME_TYPES
+  };
+};
+
+class JfrStackTrace : public JfrCHeapObj {
+  friend class JfrNativeSamplerCallback;
+  friend class JfrStackTraceRepository;
+  friend class ObjectSampleCheckpoint;
+  friend class ObjectSampler;
+  friend class OSThreadSampler;
+  friend class ProcessStackTrace;
+  friend class StackTraceInstall;
+  friend class StackTraceWrite;
+
+ private:
+  const JfrStackTrace* _next;
+  JfrStackFrame* _frames;
+  traceid _id;
+  unsigned int _hash;
+  u4 _nr_of_frames;
+  u4 _max_frames;
+  bool _frames_ownership;
+  bool _reached_root;
+  mutable bool _lineno;
+  mutable bool _written;
+
+  const JfrStackTrace* next() const { return _next; }
+
+  bool should_write() const { return !_written; }
+  void write(JfrChunkWriter& cw) const;
+  void write(JfrCheckpointWriter& cpw) const;
+  bool equals(const JfrStackTrace& rhs) const;
+
+  void set_id(traceid id) { _id = id; }
+  void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
+  void set_hash(unsigned int hash) { _hash = hash; }
+  void set_reached_root(bool reached_root) { _reached_root = reached_root; }
+  void resolve_linenos() const;
+
+  bool record_thread(JavaThread& thread, frame& frame);
+  bool record_safe(JavaThread* thread, int skip);
+
+  bool have_lineno() const { return _lineno; }
+  bool full_stacktrace() const { return _reached_root; }
+
+  JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next);
+  void operator=(const JfrStackTrace& trace);
+
+ public:
+  JfrStackTrace(JfrStackFrame* frames, u4 max_frames);
+  ~JfrStackTrace();
+  unsigned int hash() const { return _hash; }
+  traceid id() const { return _id; }
+  u4 number_of_frames() const { return _nr_of_frames; }
+};
+
+#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -24,66 +24,17 @@
 
 #include "precompiled.hpp"
 #include "jfr/metadata/jfrSerializer.hpp"
-#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
-#include "jfr/recorder/service/jfrOptionSet.hpp"
 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
-#include "jfr/utilities/jfrTypes.hpp"
-#include "memory/allocation.inline.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/task.hpp"
-#include "runtime/vframe.inline.hpp"
-
-class vframeStreamSamples : public vframeStreamCommon {
- public:
-  // constructor that starts with sender of frame fr (top_frame)
-  vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub);
-  void samples_next();
-  void stop() {}
-};
-
-vframeStreamSamples::vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
-  _stop_at_java_call_stub = stop_at_java_call_stub;
-  _frame = fr;
-
-  // We must always have a valid frame to start filling
-  bool filled_in = fill_from_frame();
-  assert(filled_in, "invariant");
-}
-
-// Solaris SPARC Compiler1 needs an additional check on the grandparent
-// of the top_frame when the parent of the top_frame is interpreted and
-// the grandparent is compiled. However, in this method we do not know
-// the relationship of the current _frame relative to the top_frame so
-// we implement a more broad sanity check. When the previous callee is
-// interpreted and the current sender is compiled, we verify that the
-// current sender is also walkable. If it is not walkable, then we mark
-// the current vframeStream as at the end.
-void vframeStreamSamples::samples_next() {
-  // handle frames with inlining
-  if (_mode == compiled_mode &&
-      vframeStreamCommon::fill_in_compiled_inlined_sender()) {
-    return;
-  }
-
-  // handle general case
-  u4 loop_count = 0;
-  u4 loop_max = MAX_STACK_DEPTH * 2;
-  do {
-    loop_count++;
-    // By the time we get here we should never see unsafe but better safe then segv'd
-    if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) {
-      _mode = at_end_mode;
-      return;
-    }
-    _frame = _frame.sender(&_reg_map);
-  } while (!fill_from_frame());
-}
 
 static JfrStackTraceRepository* _instance = NULL;
 
+JfrStackTraceRepository::JfrStackTraceRepository() : _next_id(0), _entries(0) {
+  memset(_table, 0, sizeof(_table));
+}
+
 JfrStackTraceRepository& JfrStackTraceRepository::instance() {
   return *_instance;
 }
@@ -94,15 +45,6 @@
   return _instance;
 }
 
-void JfrStackTraceRepository::destroy() {
-  assert(_instance != NULL, "invarinat");
-  delete _instance;
-  _instance = NULL;
-}
-
-JfrStackTraceRepository::JfrStackTraceRepository() : _next_id(0), _entries(0) {
-  memset(_table, 0, sizeof(_table));
-}
 class JfrFrameType : public JfrSerializer {
  public:
   void serialize(JfrCheckpointWriter& writer) {
@@ -122,119 +64,10 @@
   return JfrSerializer::register_serializer(TYPE_FRAMETYPE, true, new JfrFrameType());
 }
 
-size_t JfrStackTraceRepository::clear() {
-  MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
-  if (_entries == 0) {
-    return 0;
-  }
-  for (u4 i = 0; i < TABLE_SIZE; ++i) {
-    JfrStackTraceRepository::StackTrace* stacktrace = _table[i];
-    while (stacktrace != NULL) {
-      JfrStackTraceRepository::StackTrace* next = stacktrace->next();
-      delete stacktrace;
-      stacktrace = next;
-    }
-  }
-  memset(_table, 0, sizeof(_table));
-  const size_t processed = _entries;
-  _entries = 0;
-  return processed;
-}
-
-traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
-  MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
-  const size_t index = stacktrace._hash % TABLE_SIZE;
-  const StackTrace* table_entry = _table[index];
-
-  while (table_entry != NULL) {
-    if (table_entry->equals(stacktrace)) {
-      return table_entry->id();
-    }
-    table_entry = table_entry->next();
-  }
-
-  if (!stacktrace.have_lineno()) {
-    return 0;
-  }
-
-  traceid id = ++_next_id;
-  _table[index] = new StackTrace(id, stacktrace, _table[index]);
-  ++_entries;
-  return id;
-}
-
-traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
-  return instance().add_trace(stacktrace);
-}
-
-traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
-  assert(thread == Thread::current(), "invariant");
-  JfrThreadLocal* const tl = thread->jfr_thread_local();
-  assert(tl != NULL, "invariant");
-  if (tl->has_cached_stack_trace()) {
-    return tl->cached_stack_trace_id();
-  }
-  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
-    return 0;
-  }
-  JfrStackFrame* frames = tl->stackframes();
-  if (frames == NULL) {
-    // pending oom
-    return 0;
-  }
-  assert(frames != NULL, "invariant");
-  assert(tl->stackframes() == frames, "invariant");
-  return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth());
-}
-
-traceid JfrStackTraceRepository::record(Thread* thread, int skip, unsigned int* hash) {
-  assert(thread == Thread::current(), "invariant");
-  JfrThreadLocal* const tl = thread->jfr_thread_local();
-  assert(tl != NULL, "invariant");
-
-  if (tl->has_cached_stack_trace()) {
-    *hash = tl->cached_stack_trace_hash();
-    return tl->cached_stack_trace_id();
-  }
-  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view() || tl->is_excluded()) {
-    return 0;
-  }
-  JfrStackFrame* frames = tl->stackframes();
-  if (frames == NULL) {
-    // pending oom
-    return 0;
-  }
-  assert(frames != NULL, "invariant");
-  assert(tl->stackframes() == frames, "invariant");
-  return instance().record_for((JavaThread*)thread, skip, frames, tl->stackdepth(), hash);
-}
-
-traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
-  JfrStackTrace stacktrace(frames, max_frames);
-  if (!stacktrace.record_safe(thread, skip)) {
-    return 0;
-  }
-  traceid tid = add(stacktrace);
-  if (tid == 0) {
-    stacktrace.resolve_linenos();
-    tid = add(stacktrace);
-  }
-  return tid;
-}
-
-traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) {
-  assert(hash != NULL && *hash == 0, "invariant");
-  JfrStackTrace stacktrace(frames, max_frames);
-  if (!stacktrace.record_safe(thread, skip, true)) {
-    return 0;
-  }
-  traceid tid = add(stacktrace);
-  if (tid == 0) {
-    stacktrace.resolve_linenos();
-    tid = add(stacktrace);
-  }
-  *hash = stacktrace._hash;
-  return tid;
+void JfrStackTraceRepository::destroy() {
+  assert(_instance != NULL, "invarinat");
+  delete _instance;
+  _instance = NULL;
 }
 
 size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
@@ -242,9 +75,9 @@
   assert(_entries > 0, "invariant");
   int count = 0;
   for (u4 i = 0; i < TABLE_SIZE; ++i) {
-    JfrStackTraceRepository::StackTrace* stacktrace = _table[i];
+    JfrStackTrace* stacktrace = _table[i];
     while (stacktrace != NULL) {
-      JfrStackTraceRepository::StackTrace* next = stacktrace->next();
+      JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
       if (stacktrace->should_write()) {
         stacktrace->write(sw);
         ++count;
@@ -268,7 +101,7 @@
 
 traceid JfrStackTraceRepository::write(JfrCheckpointWriter& writer, traceid id, unsigned int hash) {
   assert(JfrStacktrace_lock->owned_by_self(), "invariant");
-  const StackTrace* const trace = resolve_entry(hash, id);
+  const JfrStackTrace* const trace = lookup(hash, id);
   assert(trace != NULL, "invariant");
   assert(trace->hash() == hash, "invariant");
   assert(trace->id() == id, "invariant");
@@ -276,84 +109,99 @@
   return id;
 }
 
-JfrStackTraceRepository::StackTrace::StackTrace(traceid id, const JfrStackTrace& trace, JfrStackTraceRepository::StackTrace* next) :
-  _next(next),
-  _frames(NULL),
-  _id(id),
-  _nr_of_frames(trace._nr_of_frames),
-  _hash(trace._hash),
-  _reached_root(trace._reached_root),
-  _written(false) {
-  if (_nr_of_frames > 0) {
-    _frames = NEW_C_HEAP_ARRAY(JfrStackFrame, _nr_of_frames, mtTracing);
-    memcpy(_frames, trace._frames, _nr_of_frames * sizeof(JfrStackFrame));
-  }
+void JfrStackTraceRepository::write_metadata(JfrCheckpointWriter& writer) {
+  JfrFrameType fct;
+  writer.write_type(TYPE_FRAMETYPE);
+  fct.serialize(writer);
 }
 
-JfrStackTraceRepository::StackTrace::~StackTrace() {
-  if (_frames != NULL) {
-    FREE_C_HEAP_ARRAY(JfrStackFrame, _frames);
+size_t JfrStackTraceRepository::clear() {
+  MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+  if (_entries == 0) {
+    return 0;
   }
-}
-
-bool JfrStackTraceRepository::StackTrace::equals(const JfrStackTrace& rhs) const {
-  if (_reached_root != rhs._reached_root || _nr_of_frames != rhs._nr_of_frames || _hash != rhs._hash) {
-    return false;
-  }
-  for (u4 i = 0; i < _nr_of_frames; ++i) {
-    if (!_frames[i].equals(rhs._frames[i])) {
-      return false;
+  for (u4 i = 0; i < TABLE_SIZE; ++i) {
+    JfrStackTrace* stacktrace = _table[i];
+    while (stacktrace != NULL) {
+      JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
+      delete stacktrace;
+      stacktrace = next;
     }
   }
-  return true;
+  memset(_table, 0, sizeof(_table));
+  const size_t processed = _entries;
+  _entries = 0;
+  return processed;
 }
 
-template <typename Writer>
-static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
-  w.write((u8)id);
-  w.write((u1)!reached_root);
-  w.write(nr_of_frames);
-  for (u4 i = 0; i < nr_of_frames; ++i) {
-    frames[i].write(w);
+traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
+  assert(thread == Thread::current(), "invariant");
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  if (tl->has_cached_stack_trace()) {
+    return tl->cached_stack_trace_id();
   }
+  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view() || tl->is_excluded()) {
+    return 0;
+  }
+  JfrStackFrame* frames = tl->stackframes();
+  if (frames == NULL) {
+    // pending oom
+    return 0;
+  }
+  assert(frames != NULL, "invariant");
+  assert(tl->stackframes() == frames, "invariant");
+  return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth());
 }
 
-void JfrStackTraceRepository::StackTrace::write(JfrChunkWriter& sw) const {
-  assert(!_written, "invariant");
-  write_stacktrace(sw, _id, _reached_root, _nr_of_frames, _frames);
-  _written = true;
-}
-
-void JfrStackTraceRepository::StackTrace::write(JfrCheckpointWriter& cpw) const {
-  write_stacktrace(cpw, _id, _reached_root, _nr_of_frames, _frames);
+traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
+  JfrStackTrace stacktrace(frames, max_frames);
+  return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0;
 }
 
-// JfrStackFrame
+traceid JfrStackTraceRepository::add(const JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread != NULL, "invariant");
+  assert(stacktrace->hash() != 0, "invariant");
+  return add(*stacktrace);
+}
 
-bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
-  return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
+traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
+  traceid tid = instance().add_trace(stacktrace);
+  if (tid == 0) {
+    stacktrace.resolve_linenos();
+    tid = instance().add_trace(stacktrace);
+  }
+  assert(tid != 0, "invariant");
+  return tid;
 }
 
-template <typename Writer>
-static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
-  w.write((u8)methodid);
-  w.write((u4)line);
-  w.write((u4)bci);
-  w.write((u8)type);
-}
+traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
+  MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+  const size_t index = stacktrace._hash % TABLE_SIZE;
+  const JfrStackTrace* table_entry = _table[index];
 
-void JfrStackFrame::write(JfrChunkWriter& cw) const {
-  write_frame(cw, _methodid, _line, _bci, _type);
-}
+  while (table_entry != NULL) {
+    if (table_entry->equals(stacktrace)) {
+      return table_entry->id();
+    }
+    table_entry = table_entry->next();
+  }
 
-void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
-  write_frame(cpw, _methodid, _line, _bci, _type);
+  if (!stacktrace.have_lineno()) {
+    return 0;
+  }
+
+  traceid id = ++_next_id;
+  _table[index] = new JfrStackTrace(id, stacktrace, _table[index]);
+  ++_entries;
+  return id;
 }
 
 // invariant is that the entry to be resolved actually exists in the table
-const JfrStackTraceRepository::StackTrace* JfrStackTraceRepository::resolve_entry(unsigned int hash, traceid id) const {
+const JfrStackTrace* JfrStackTraceRepository::lookup(unsigned int hash, traceid id) const {
   const size_t index = (hash % TABLE_SIZE);
-  const StackTrace* trace = _table[index];
+  const JfrStackTrace* trace = _table[index];
   while (trace != NULL && trace->id() != id) {
     trace = trace->next();
   }
@@ -363,100 +211,16 @@
   return trace;
 }
 
-void JfrStackFrame::resolve_lineno() {
-  assert(_method, "no method pointer");
-  assert(_line == 0, "already have linenumber");
-  _line = _method->line_number_from_bci(_bci);
-}
-
-void JfrStackTrace::set_frame(u4 frame_pos, JfrStackFrame& frame) {
-  assert(frame_pos < _max_frames, "illegal frame_pos");
-  _frames[frame_pos] = frame;
-}
-
-void JfrStackTrace::resolve_linenos() {
-  for(unsigned int i = 0; i < _nr_of_frames; i++) {
-    _frames[i].resolve_lineno();
+bool JfrStackTraceRepository::fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip) {
+  assert(thread == Thread::current(), "invariant");
+  assert(stacktrace != NULL, "invariant");
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  const unsigned int cached_stacktrace_hash = tl->cached_stack_trace_hash();
+  if (cached_stacktrace_hash != 0) {
+    stacktrace->set_hash(cached_stacktrace_hash);
+    return true;
   }
-  _lineno = true;
+  return stacktrace->record_safe(thread, skip);
 }
 
-bool JfrStackTrace::record_safe(JavaThread* thread, int skip, bool leakp /* false */) {
-  assert(thread == Thread::current(), "Thread stack needs to be walkable");
-  vframeStream vfs(thread);
-  u4 count = 0;
-  _reached_root = true;
-  for(int i = 0; i < skip; i++) {
-    if (vfs.at_end()) {
-      break;
-    }
-    vfs.next();
-  }
-
-  while (!vfs.at_end()) {
-    if (count >= _max_frames) {
-      _reached_root = false;
-      break;
-    }
-    const Method* method = vfs.method();
-    const traceid mid = JfrTraceId::use(method, leakp);
-    int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
-    int bci = 0;
-    if (method->is_native()) {
-      type = JfrStackFrame::FRAME_NATIVE;
-    } else {
-      bci = vfs.bci();
-    }
-    // Can we determine if it's inlined?
-    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
-    _frames[count] = JfrStackFrame(mid, bci, type, method);
-    vfs.next();
-    count++;
-  }
-
-  _nr_of_frames = count;
-  return true;
-}
-
-bool JfrStackTrace::record_thread(JavaThread& thread, frame& frame) {
-  vframeStreamSamples st(&thread, frame, false);
-  u4 count = 0;
-  _reached_root = true;
-
-  while (!st.at_end()) {
-    if (count >= _max_frames) {
-      _reached_root = false;
-      break;
-    }
-    const Method* method = st.method();
-    if (!Method::is_valid_method(method)) {
-      // we throw away everything we've gathered in this sample since
-      // none of it is safe
-      return false;
-    }
-    const traceid mid = JfrTraceId::use(method);
-    int type = st.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
-    int bci = 0;
-    if (method->is_native()) {
-      type = JfrStackFrame::FRAME_NATIVE;
-    } else {
-      bci = st.bci();
-    }
-    const int lineno = method->line_number_from_bci(bci);
-    // Can we determine if it's inlined?
-    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
-    _frames[count] = JfrStackFrame(method, mid, bci, type, lineno);
-    st.samples_next();
-    count++;
-  }
-
-  _lineno = true;
-  _nr_of_frames = count;
-  return true;
-}
-
-void JfrStackTraceRepository::write_metadata(JfrCheckpointWriter& writer) {
-  JfrFrameType fct;
-  writer.write_type(TYPE_FRAMETYPE);
-  fct.serialize(writer);
-}
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -25,129 +25,51 @@
 #ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
 #define SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
 
+#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
 #include "jfr/utilities/jfrAllocation.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 
-class frame;
 class JavaThread;
 class JfrCheckpointWriter;
 class JfrChunkWriter;
-class Method;
-
-class JfrStackFrame {
- private:
-  const Method* _method;
-  traceid _methodid;
-  int _line;
-  int _bci;
-  u1 _type;
-
- public:
-  enum {
-    FRAME_INTERPRETER = 0,
-    FRAME_JIT,
-    FRAME_INLINE,
-    FRAME_NATIVE,
-    NUM_FRAME_TYPES
-  };
-
-  JfrStackFrame(const traceid& id, int bci, int type, const Method* method) :
-    _method(method), _methodid(id), _line(0), _bci(bci), _type(type) {}
-  JfrStackFrame(const Method* method, const traceid& id, int bci, int type, int lineno) :
-    _method(method), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
-  bool equals(const JfrStackFrame& rhs) const;
-  void write(JfrChunkWriter& cw) const;
-  void write(JfrCheckpointWriter& cpw) const;
-  void resolve_lineno();
-};
-
-class JfrStackTrace : public StackObj {
-  friend class JfrStackTraceRepository;
- private:
-  JfrStackFrame* _frames;
-  traceid _id;
-  u4 _nr_of_frames;
-  unsigned int _hash;
-  const u4 _max_frames;
-  bool _reached_root;
-  bool _lineno;
-
- public:
-  JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames),
-                                                        _id(0),
-                                                        _nr_of_frames(0),
-                                                        _hash(0),
-                                                        _max_frames(max_frames),
-                                                        _reached_root(false),
-                                                        _lineno(false) {}
-  bool record_thread(JavaThread& thread, frame& frame);
-  bool record_safe(JavaThread* thread, int skip, bool leakp = false);
-  void resolve_linenos();
-  void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
-  void set_hash(unsigned int hash) { _hash = hash; }
-  void set_frame(u4 frame_pos, JfrStackFrame& frame);
-  void set_reached_root(bool reached_root) { _reached_root = reached_root; }
-  bool full_stacktrace() const { return _reached_root; }
-  bool have_lineno() const { return _lineno; }
-};
 
 class JfrStackTraceRepository : public JfrCHeapObj {
+  friend class FlushStackTraceRepository;
   friend class JfrRecorder;
   friend class JfrRecorderService;
+  friend class ObjectSampleCheckpoint;
   friend class ObjectSampler;
-  friend class WriteObjectSampleStacktrace;
-
-  class StackTrace : public JfrCHeapObj {
-    friend class JfrStackTrace;
-    friend class JfrStackTraceRepository;
-   private:
-    StackTrace* _next;
-    JfrStackFrame* _frames;
-    const traceid _id;
-    u4 _nr_of_frames;
-    unsigned int _hash;
-    bool _reached_root;
-    mutable bool _written;
-
-    unsigned int hash() const { return _hash; }
-    bool should_write() const { return !_written; }
-
-   public:
-    StackTrace(traceid id, const JfrStackTrace& trace, StackTrace* next);
-    ~StackTrace();
-    traceid id() const { return _id; }
-    StackTrace* next() const { return _next; }
-    void write(JfrChunkWriter& cw) const;
-    void write(JfrCheckpointWriter& cpw) const;
-    bool equals(const JfrStackTrace& rhs) const;
-  };
+  friend class StackTraceInstall;
+  friend class StackTraceWrite;
+  friend class WriteStackTraceRepository;
 
  private:
   static const u4 TABLE_SIZE = 2053;
-  StackTrace* _table[TABLE_SIZE];
+  JfrStackTrace* _table[TABLE_SIZE];
   traceid _next_id;
   u4 _entries;
 
-  size_t write_impl(JfrChunkWriter& cw, bool clear);
-  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
-  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames, unsigned int* hash);
-  traceid add_trace(const JfrStackTrace& stacktrace);
-  const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
-
-  static void write_metadata(JfrCheckpointWriter& cpw);
-
   JfrStackTraceRepository();
   static JfrStackTraceRepository& instance();
- public:
   static JfrStackTraceRepository* create();
   bool initialize();
   static void destroy();
-  static traceid add(const JfrStackTrace& stacktrace);
-  static traceid record(Thread* thread, int skip = 0);
-  static traceid record(Thread* thread, int skip, unsigned int* hash);
+
+  size_t write_impl(JfrChunkWriter& cw, bool clear);
+  static void write_metadata(JfrCheckpointWriter& cpw);
   traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash);
   size_t write(JfrChunkWriter& cw, bool clear);
   size_t clear();
+
+  traceid add_trace(const JfrStackTrace& stacktrace);
+  static traceid add(const JfrStackTrace* stacktrace, JavaThread* thread);
+  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
+  const JfrStackTrace* lookup(unsigned int hash, traceid id) const;
+  static bool fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip);
+
+ public:
+  static traceid add(const JfrStackTrace& stacktrace);
+  static traceid record(Thread* thread, int skip = 0);
 };
 
 #endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
--- a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -140,10 +140,6 @@
   OrderAccess::release_store(&_identity, (const void*)NULL);
 }
 
-void JfrBuffer::clear_identity() {
-  _identity = NULL;
-}
-
 bool JfrBuffer::acquired_by(const void* id) const {
   return identity() == id;
 }
--- a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -27,10 +27,6 @@
 #include "jfr/utilities/jfrAllocation.hpp"
 #include "jfr/utilities/jfrDoublyLinkedList.hpp"
 #include "jfr/utilities/jfrIterator.hpp"
-#include "jfr/utilities/jfrTypes.hpp"
-#include "runtime/os.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
 
 template <typename T, template <typename> class RetrievalType, typename Callback>
 class JfrMemorySpace : public JfrCHeapObj {
@@ -107,62 +103,4 @@
   debug_only(bool in_free_list(const Type* t) const { return _free.in_list(t); })
 };
 
-// allocations are even multiples of the mspace min size
-inline u8 align_allocation_size(u8 requested_size, size_t min_elem_size) {
-  assert((int)min_elem_size % os::vm_page_size() == 0, "invariant");
-  u8 alloc_size_bytes = min_elem_size;
-  while (requested_size > alloc_size_bytes) {
-    alloc_size_bytes <<= 1;
-  }
-  assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant");
-  return alloc_size_bytes;
-}
-
-template <typename T, template <typename> class RetrievalType, typename Callback>
-T* JfrMemorySpace<T, RetrievalType, Callback>::allocate(size_t size) {
-  const u8 aligned_size_bytes = align_allocation_size(size, _min_elem_size);
-  void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(T));
-  if (allocation == NULL) {
-    return NULL;
-  }
-  T* const t = new (allocation) T;
-  assert(t != NULL, "invariant");
-  if (!t->initialize(sizeof(T), aligned_size_bytes)) {
-    JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T));
-    return NULL;
-  }
-  return t;
-}
-
-template <typename T, template <typename> class RetrievalType, typename Callback>
-void JfrMemorySpace<T, RetrievalType, Callback>::deallocate(T* t) {
-  assert(t != NULL, "invariant");
-  assert(!_free.in_list(t), "invariant");
-  assert(!_full.in_list(t), "invariant");
-  assert(t != NULL, "invariant");
-  JfrCHeapObj::free(t, t->total_size());
-}
-
-template <typename Mspace>
-class MspaceLock {
- private:
-  Mspace* _mspace;
- public:
-  MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); }
-  ~MspaceLock() { _mspace->unlock(); }
-};
-
-template <typename Mspace>
-class ReleaseOp : public StackObj {
- private:
-  Mspace* _mspace;
-  Thread* _thread;
-  bool _release_full;
- public:
-  typedef typename Mspace::Type Type;
-  ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) : _mspace(mspace), _thread(thread), _release_full(release_full) {}
-  bool process(Type* t);
-  size_t processed() const { return 0; }
-};
-
 #endif // SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP
--- a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -26,6 +26,7 @@
 #define SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP
 
 #include "jfr/recorder/storage/jfrMemorySpace.hpp"
+#include "runtime/os.hpp"
 
 template <typename T, template <typename> class RetrievalType, typename Callback>
 JfrMemorySpace<T, RetrievalType, Callback>::
@@ -69,6 +70,42 @@
   return true;
 }
 
+// allocations are even multiples of the mspace min size
+static inline size_t align_allocation_size(size_t requested_size, size_t min_elem_size) {
+  assert((int)min_elem_size % os::vm_page_size() == 0, "invariant");
+  u8 alloc_size_bytes = min_elem_size;
+  while (requested_size > alloc_size_bytes) {
+    alloc_size_bytes <<= 1;
+  }
+  assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant");
+  return (size_t)alloc_size_bytes;
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+inline T* JfrMemorySpace<T, RetrievalType, Callback>::allocate(size_t size) {
+  const size_t aligned_size_bytes = align_allocation_size(size, _min_elem_size);
+  void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(T));
+  if (allocation == NULL) {
+    return NULL;
+  }
+  T* const t = new (allocation) T;
+  assert(t != NULL, "invariant");
+  if (!t->initialize(sizeof(T), aligned_size_bytes)) {
+    JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T));
+    return NULL;
+  }
+  return t;
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+inline void JfrMemorySpace<T, RetrievalType, Callback>::deallocate(T* t) {
+  assert(t != NULL, "invariant");
+  assert(!_free.in_list(t), "invariant");
+  assert(!_full.in_list(t), "invariant");
+  assert(t != NULL, "invariant");
+  JfrCHeapObj::free(t, t->total_size());
+}
+
 template <typename T, template <typename> class RetrievalType, typename Callback>
 inline void JfrMemorySpace<T, RetrievalType, Callback>::release_full(T* t) {
   assert(is_locked(), "invariant");
@@ -123,6 +160,15 @@
   }
 }
 
+template <typename Mspace, typename Callback>
+static inline Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, Callback* cb) {
+  Mspace* const mspace = new Mspace(buffer_size, limit, cache_count, cb);
+  if (mspace != NULL) {
+    mspace->initialize();
+  }
+  return mspace;
+}
+
 template <typename Mspace>
 inline size_t size_adjustment(size_t size, Mspace* mspace) {
   assert(mspace != NULL, "invariant");
@@ -175,6 +221,15 @@
 }
 
 template <typename Mspace>
+class MspaceLock {
+ private:
+  Mspace* _mspace;
+ public:
+  MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); }
+  ~MspaceLock() { _mspace->unlock(); }
+};
+
+template <typename Mspace>
 inline typename Mspace::Type* mspace_allocate_transient_to_full(size_t size, Mspace* mspace, Thread* thread) {
   typename Mspace::Type* const t = mspace_allocate_transient(size, mspace, thread);
   if (t == NULL) return NULL;
@@ -345,6 +400,20 @@
 }
 
 template <typename Mspace>
+class ReleaseOp : public StackObj {
+ private:
+  Mspace* _mspace;
+  Thread* _thread;
+  bool _release_full;
+ public:
+  typedef typename Mspace::Type Type;
+  ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) :
+    _mspace(mspace), _thread(thread), _release_full(release_full) {}
+  bool process(Type* t);
+  size_t processed() const { return 0; }
+};
+
+template <typename Mspace>
 inline bool ReleaseOp<Mspace>::process(typename Mspace::Type* t) {
   assert(t != NULL, "invariant");
   // assumes some means of exclusive access to t
--- a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -325,7 +325,11 @@
 
 static bool insert_full_age_node(JfrAgeNode* age_node, JfrStorageAgeMspace* age_mspace, Thread* thread) {
   assert(JfrBuffer_lock->owned_by_self(), "invariant");
+  assert(age_node != NULL, "invariant");
+  assert(age_node->acquired_by_self(), "invariant");
   assert(age_node->retired_buffer()->retired(), "invariant");
+  age_node->release(); // drop identity claim on age node when inserting to full list
+  assert(age_node->identity() == NULL, "invariant");
   age_mspace->insert_full_head(age_node);
   return true;
 }
@@ -342,8 +346,8 @@
       return false;
     }
   }
+  assert(age_node != NULL, "invariant");
   assert(age_node->acquired_by_self(), "invariant");
-  assert(age_node != NULL, "invariant");
   age_node->set_retired_buffer(buffer);
   control.increment_full();
   return insert_full_age_node(age_node, age_mspace, thread);
@@ -425,6 +429,7 @@
       if (oldest_age_node == NULL) {
         break;
       }
+      assert(oldest_age_node->identity() == NULL, "invariant");
       BufferPtr const buffer = oldest_age_node->retired_buffer();
       assert(buffer->retired(), "invariant");
       discarded_size += buffer->unflushed_size();
@@ -436,7 +441,7 @@
       } else {
         mspace_release_full(oldest_age_node, _age_mspace);
         buffer->reinitialize();
-        buffer->release(); // pusb
+        buffer->release(); // publish
         break;
       }
     }
@@ -654,12 +659,12 @@
   JfrAgeNode* last = NULL;
   while (node != NULL) {
     last = node;
+    assert(node->identity() == NULL, "invariant");
     BufferPtr const buffer = node->retired_buffer();
     assert(buffer != NULL, "invariant");
     assert(buffer->retired(), "invariant");
     processor.process(buffer);
     // at this point, buffer is already live or destroyed
-    node->clear_identity();
     JfrAgeNode* const next = (JfrAgeNode*)node->next();
     if (node->transient()) {
       // detach
@@ -704,9 +709,10 @@
 
 static void log(size_t count, size_t amount, bool clear = false) {
   if (log_is_enabled(Debug, jfr, system)) {
-    assert(count > 0, "invariant");
+    if (count > 0) {
       log_debug(jfr, system)("%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
         clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
+    }
   }
 }
 
--- a/src/hotspot/share/jfr/support/jfrFlush.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/support/jfrFlush.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -62,7 +62,6 @@
 }
 
 void jfr_conditional_flush(JfrEventId id, size_t size, Thread* t) {
-  assert(jfr_is_event_enabled(id), "invariant");
   if (t->jfr_thread_local()->has_native_buffer()) {
     JfrStorage::Buffer* const buffer = t->jfr_thread_local()->native_buffer();
     if (LessThanSize<JfrStorage::Buffer>::evaluate(buffer, size)) {
--- a/src/hotspot/share/jfr/support/jfrFlush.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/support/jfrFlush.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -48,10 +48,12 @@
 
 template <typename Event>
 class JfrConditionalFlush {
+ protected:
+  bool _enabled;
  public:
   typedef JfrBuffer Type;
-  JfrConditionalFlush(Thread* t) {
-    if (jfr_is_event_enabled(Event::eventId)) {
+  JfrConditionalFlush(Thread* t) : _enabled(jfr_is_event_enabled(Event::eventId)) {
+    if (_enabled) {
       jfr_conditional_flush(Event::eventId, sizeof(Event), t);
     }
   }
@@ -63,7 +65,7 @@
   bool _owner;
  public:
   JfrConditionalFlushWithStacktrace(Thread* t) : JfrConditionalFlush<Event>(t), _t(t), _owner(false) {
-    if (Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
+    if (this->_enabled && Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
       _owner = jfr_save_stacktrace(t);
     }
   }
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -25,13 +25,13 @@
 #include "precompiled.hpp"
 #include "jfr/jfrEvents.hpp"
 #include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 #include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
 #include "jfr/recorder/jfrRecorder.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
 #include "jfr/recorder/service/jfrOptionSet.hpp"
 #include "jfr/recorder/storage/jfrStorage.hpp"
-#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
 #include "jfr/support/jfrThreadLocal.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/os.hpp"
@@ -136,7 +136,9 @@
   assert(!tl->is_dead(), "invariant");
   if (JfrRecorder::is_recording()) {
     if (t->is_Java_thread() && !tl->is_excluded()) {
-      send_java_thread_end_events(tl->thread_id(), (JavaThread*)t);
+      JavaThread* const jt = (JavaThread*)t;
+      ObjectSampleCheckpoint::on_thread_exit(jt);
+      send_java_thread_end_events(tl->thread_id(), jt);
     }
   }
   release(tl, Thread::current()); // because it could be that Thread::current() != t
@@ -165,9 +167,7 @@
 
 JfrStackFrame* JfrThreadLocal::install_stackframes() const {
   assert(_stackframes == NULL, "invariant");
-  _stackdepth = (u4)JfrOptionSet::stackdepth();
-  guarantee(_stackdepth > 0, "Stackdepth must be > 0");
-  _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, _stackdepth, mtTracing);
+  _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing);
   return _stackframes;
 }
 
@@ -188,3 +188,7 @@
   assert(t != NULL, "invariant");
   t->jfr_thread_local()->_excluded = false;
 }
+
+u4 JfrThreadLocal::stackdepth() const {
+  return _stackdepth != 0 ? _stackdepth : (u4)JfrOptionSet::stackdepth();
+}
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -114,9 +114,7 @@
     _stackframes = frames;
   }
 
-  u4 stackdepth() const {
-    return _stackdepth;
-  }
+  u4 stackdepth() const;
 
   void set_stackdepth(u4 depth) {
     _stackdepth = depth;
--- a/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -41,41 +41,23 @@
 #define REMOVE_ID(k) JfrTraceId::remove(k);
 #define RESTORE_ID(k) JfrTraceId::restore(k);
 
-class JfrTraceFlag {
- private:
-  mutable jbyte _flags;
- public:
-  JfrTraceFlag() : _flags(0) {}
-  explicit JfrTraceFlag(jbyte flags) : _flags(flags) {}
-  void set_flag(jbyte flag) const {
-    _flags |= flag;
-  }
-  void clear_flag(jbyte flag) const {
-    _flags &= (~flag);
-  }
-  jbyte flags() const { return _flags; }
-  bool is_set(jbyte flag) const {
-    return (_flags & flag) != 0;
-  }
-  jbyte* const flags_addr() const {
-    return &_flags;
-  }
-};
-
 #define DEFINE_TRACE_FLAG mutable JfrTraceFlag _trace_flags
 
 #define DEFINE_TRACE_FLAG_ACCESSOR                 \
-  void set_trace_flag(jbyte flag) const {          \
-    _trace_flags.set_flag(flag);                   \
+  bool is_trace_flag_set(jshort flag) const {      \
+    return _trace_flags.is_set(flag);              \
   }                                                \
-  jbyte trace_flags() const {                      \
+  jshort trace_flags() const {                     \
     return _trace_flags.flags();                   \
   }                                                \
-  bool is_trace_flag_set(jbyte flag) const {       \
-    return _trace_flags.is_set(flag);              \
+  void set_trace_flags(jshort flags) const {       \
+    _trace_flags.set_flags(flags);                 \
   }                                                \
-  jbyte* const trace_flags_addr() const {          \
+  jbyte* trace_flags_addr() const {                \
     return _trace_flags.flags_addr();              \
+  }                                                \
+  jbyte* trace_meta_addr() const {                 \
+    return _trace_flags.meta_addr();               \
   }
 
 #endif // SHARE_JFR_SUPPORT_JFRTRACEIDEXTENSION_HPP
--- a/src/hotspot/share/jfr/utilities/jfrHashtable.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/utilities/jfrHashtable.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -134,13 +134,28 @@
   typedef IdType ID;
   void init() { _id = 0; }
   ID id() const { return _id; }
-  void set_id(ID id) { _id = id; }
+  void set_id(ID id) const { _id = id; }
   void set_value(const T& value) { this->set_literal(value); }
   T& value() const { return *const_cast<Entry*>(this)->literal_addr();}
   const T* value_addr() const { return const_cast<Entry*>(this)->literal_addr(); }
-
  private:
-  ID _id;
+  mutable ID _id;
+};
+
+template <typename T, typename IdType>
+class ListEntry : public Entry<T, IdType> {
+ public:
+  void init() { Entry<T, IdType>::init(); _list_next = NULL; _serialized = false; _unloading = false; }
+  const ListEntry<T, IdType>* list_next() const { return _list_next; }
+  void set_list_next(const ListEntry<T, IdType>* next) const { _list_next = next; }
+  bool is_serialized() const { return _serialized; }
+  void set_serialized() const { _serialized = true; }
+  bool is_unloading() const { return _unloading; }
+  void set_unloading() const { _unloading = true; }
+ private:
+  mutable const ListEntry<T, IdType>* _list_next;
+  mutable bool _serialized;
+  mutable bool _unloading;
 };
 
 template <typename T, typename IdType, template <typename, typename> class Entry,
@@ -190,6 +205,7 @@
   void free_entry(HashEntry* entry) {
     assert(entry != NULL, "invariant");
     JfrBasicHashtable<T>::unlink_entry(entry);
+    _callback->unlink(entry);
     FREE_C_HEAP_ARRAY(char, entry);
   }
 
--- a/src/hotspot/share/jfr/utilities/jfrIterator.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/utilities/jfrIterator.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -40,14 +40,6 @@
   }
 };
 
-template <typename Node>
-class StopOnEmptyCondition : public AllStatic {
-public:
-  static bool has_next(const Node* node) {
-    return node != NULL && !node->empty();
-  }
-};
-
 template <typename List, template <typename> class ContinuationPredicate>
 class Navigator {
  public:
@@ -91,12 +83,6 @@
   NavigatorStopOnNull(List& list, jfr_iter_direction direction = forward) : Navigator<List, StopOnNullCondition>(list, direction) {}
 };
 
-template <typename List>
-class NavigatorStopOnEmpty : public Navigator<List, StopOnEmptyCondition> {
-public:
-  NavigatorStopOnEmpty(List& list, jfr_iter_direction direction = forward) : Navigator<List, StopOnEmptyCondition>(list, direction) {}
-};
-
 template<typename List, template <typename> class Navigator, typename AP = StackObj>
 class IteratorHost : public AP {
  private:
@@ -118,10 +104,4 @@
   StopOnNullIterator(List& list, jfr_iter_direction direction = forward) : IteratorHost<List, NavigatorStopOnNull, AP>(list, direction) {}
 };
 
-template<typename List, typename AP = StackObj>
-class StopOnEmptyIterator : public IteratorHost<List, NavigatorStopOnEmpty, AP> {
-public:
-  StopOnEmptyIterator(List& list, jfr_iter_direction direction = forward) : IteratorHost<List, NavigatorStopOnEmpty, AP>(list, direction) {}
-};
-
 #endif // SHARE_JFR_UTILITIES_JFRITERATOR_HPP
--- a/src/hotspot/share/jfr/utilities/jfrTypes.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/utilities/jfrTypes.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -38,6 +38,39 @@
 const u4 MIN_STACK_DEPTH = 1;
 const u4 MAX_STACK_DEPTH = 2048;
 
+inline int compare_traceid(const traceid& lhs, const traceid& rhs) {
+  return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
+}
+
+inline int sort_traceid(traceid* lhs, traceid* rhs) {
+  return compare_traceid(*lhs, *rhs);
+}
+
+class JfrTraceFlag {
+ private:
+  mutable jshort _flags;
+ public:
+  JfrTraceFlag() : _flags(0) {}
+  bool is_set(jshort flag) const {
+    return (_flags & flag) != 0;
+  }
+
+  jshort flags() const {
+    return _flags;
+  }
+
+  void set_flags(jshort flags) const {
+    _flags = flags;
+  }
+
+  jbyte* flags_addr() const {
+    return (jbyte*)&_flags;
+  }
+  jbyte* meta_addr() const {
+    return ((jbyte*)&_flags) + 1;
+  }
+};
+
 enum EventStartTime {
   UNTIMED,
   TIMED
--- a/src/hotspot/share/jfr/writers/jfrJavaEventWriter.cpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/writers/jfrJavaEventWriter.cpp	Sat Aug 24 14:30:27 2019 +0200
@@ -135,8 +135,7 @@
 bool JfrJavaEventWriter::initialize() {
   static bool initialized = false;
   if (!initialized) {
-    Thread* thread = Thread::current();
-    initialized = setup_event_writer_offsets(thread);
+    initialized = setup_event_writer_offsets(Thread::current());
   }
   return initialized;
 }
@@ -155,6 +154,7 @@
   // large enough to accommodate the "requested size".
   const bool is_valid = buffer->free_size() >= (size_t)(used + requested);
   u1* const new_current_position = is_valid ? buffer->pos() + used : buffer->pos();
+  assert(start_pos_offset != invalid_offset, "invariant");
   w->long_field_put(start_pos_offset, (jlong)buffer->pos());
   w->long_field_put(current_pos_offset, (jlong)new_current_position);
   // only update java writer if underlying memory changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/writers/jfrTypeWriterHost.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
+#define SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
+
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.hpp"
+
+template <typename WriterImpl, u4 ID>
+class JfrTypeWriterHost : public StackObj {
+ private:
+  WriterImpl _impl;
+  JfrCheckpointWriter* _writer;
+  JfrCheckpointContext _ctx;
+  int64_t _count_offset;
+  int _count;
+  bool _skip_header;
+ public:
+  JfrTypeWriterHost(JfrCheckpointWriter* writer,
+                    bool class_unload = false,
+                    bool skip_header = false) : _impl(writer, class_unload),
+                                                _writer(writer),
+                                                _ctx(writer->context()),
+                                                _count(0),
+                                                _skip_header(skip_header) {
+    assert(_writer != NULL, "invariant");
+    if (!_skip_header) {
+      _writer->write_type((JfrTypeId)ID);
+      _count_offset = _writer->reserve(sizeof(u4)); // Don't know how many yet
+    }
+  }
+
+  ~JfrTypeWriterHost() {
+    if (_count == 0) {
+      // nothing written, restore context for rewind
+      _writer->set_context(_ctx);
+      return;
+    }
+    assert(_count > 0, "invariant");
+    if (!_skip_header) {
+      _writer->write_count(_count, _count_offset);
+    }
+  }
+
+  bool operator()(typename WriterImpl::Type const & value) {
+    this->_count += _impl(value);
+    return true;
+  }
+
+  int count() const   { return _count; }
+  void add(int count) { _count += count; }
+};
+
+typedef int(*type_write_operation)(JfrCheckpointWriter*, const void*);
+
+template <typename T, type_write_operation op>
+class JfrTypeWriterImplHost {
+ private:
+  JfrCheckpointWriter* _writer;
+ public:
+  typedef T Type;
+  JfrTypeWriterImplHost(JfrCheckpointWriter* writer, bool class_unload = false) : _writer(writer) {}
+  int operator()(T const& value) {
+    return op(this->_writer, value);
+  }
+};
+
+template <typename T, typename Predicate, type_write_operation op>
+class JfrPredicatedTypeWriterImplHost : public JfrTypeWriterImplHost<T, op> {
+ private:
+  Predicate _predicate;
+  typedef JfrTypeWriterImplHost<T, op> Parent;
+ public:
+  JfrPredicatedTypeWriterImplHost(JfrCheckpointWriter* writer, bool class_unload = false) :
+    Parent(writer), _predicate(class_unload) {}
+  int operator()(T const& value) {
+    return _predicate(value) ? Parent::operator()(value) : 0;
+  }
+};
+
+#endif // SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
--- a/src/hotspot/share/jfr/writers/jfrWriterHost.inline.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/jfr/writers/jfrWriterHost.inline.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -114,10 +114,7 @@
 template <typename BE, typename IE, typename WriterPolicyImpl >
 template <typename T>
 inline void WriterHost<BE, IE, WriterPolicyImpl>::be_write(T value) {
-  u1* const pos = ensure_size(sizeof(T));
-  if (pos) {
-    this->set_current_pos(BE::be_write(&value, 1, pos));
-  }
+  be_write(&value, 1);
 }
 
 template <typename BE, typename IE, typename WriterPolicyImpl >
--- a/src/hotspot/share/runtime/vmOperations.hpp	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/hotspot/share/runtime/vmOperations.hpp	Sat Aug 24 14:30:27 2019 +0200
@@ -127,6 +127,7 @@
   template(ScavengeMonitors)                      \
   template(PrintMetadata)                         \
   template(GTestExecuteAtSafepoint)               \
+  template(JFROldObject)                          \
 
 class VM_Operation: public CHeapObj<mtInternal> {
  public:
--- a/src/jdk.jfr/share/classes/jdk/jfr/consumer/ChunkParser.java	Fri Aug 23 18:47:55 2019 +0200
+++ b/src/jdk.jfr/share/classes/jdk/jfr/consumer/ChunkParser.java	Sat Aug 24 14:30:27 2019 +0200
@@ -49,6 +49,7 @@
  */
 final class ChunkParser {
     private static final long CONSTANT_POOL_TYPE_ID = 1;
+    private static final String CHUNKHEADER = "jdk.types.ChunkHeader";
     private final RecordingInput input;
     private final ChunkHeader chunkHeader;
     private final MetadataDescriptor metadata;
@@ -256,13 +257,16 @@
                 ConstantLookup lookup = constantLookups.get(id);
                 Type type = typeMap.get(id);
                 if (lookup == null) {
-                    Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Found constant pool(" + id + ") that is never used");
                     if (type == null) {
                         throw new IOException(
                                 "Error parsing constant pool type " + getName(id) + " at position " + input.position() + " at check point between [" + lastCP + ", " + lastCP + size + "]");
                     }
+                    if (type.getName() != CHUNKHEADER) {
+                        Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Found constant pool(" + id + ") that is never used");
+                    }
                     ConstantMap pool = new ConstantMap(ObjectFactory.create(type, timeConverter), type.getName());
-                    constantLookups.put(type.getId(), new ConstantLookup(pool, type));
+                    lookup = new ConstantLookup(pool, type);
+                    constantLookups.put(type.getId(), lookup);
                 }
                 Parser parser = parsers.get(id);
                 if (parser == null) {
@@ -278,8 +282,8 @@
                     }
                     for (int j = 0; j < count; j++) {
                         long key = input.readLong();
-                      Object resolved = lookup.getPreviousResolved(key);
-                      if (resolved == null) {
+                        Object resolved = lookup.getPreviousResolved(key);
+                        if (resolved == null) {
                             Object v = parser.parse(input);
                             logConstant(key, v, false);
                             lookup.getLatestPool().put(key, v);