8225797: OldObjectSample event creates unexpected amount of checkpoint data
authormgronlun
Sat, 14 Sep 2019 14:40:09 +0200
changeset 58132 caa25ab47aca
parent 58131 3054503bad7d
child 58133 515fc9f6b2d6
8225797: OldObjectSample event creates unexpected amount of checkpoint data Reviewed-by: egahlin
src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp
src/hotspot/share/jfr/jfr.cpp
src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp
src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp
src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp
src/hotspot/share/jfr/leakprofiler/sampling/sampleList.hpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.hpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp
src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp
src/hotspot/share/jfr/recorder/repository/jfrChunkState.hpp
src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp
src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp
src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.hpp
src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp
src/hotspot/share/jfr/support/jfrKlassExtension.hpp
src/hotspot/share/jfr/support/jfrThreadLocal.cpp
src/hotspot/share/jfr/support/jfrThreadLocal.hpp
src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp
src/hotspot/share/jfr/utilities/jfrBlob.cpp
src/hotspot/share/jfr/utilities/jfrBlob.hpp
src/hotspot/share/jfr/utilities/jfrHashtable.hpp
src/hotspot/share/jfr/utilities/jfrTypes.hpp
src/hotspot/share/jfr/writers/jfrTypeWriterHost.hpp
src/hotspot/share/jfr/writers/jfrWriterHost.inline.hpp
--- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -44,7 +44,6 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/array.hpp"
-#include "oops/constantPool.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/method.hpp"
 #include "prims/jvmtiRedefineClasses.hpp"
@@ -1522,7 +1521,7 @@
     assert(new_method != NULL, "invariant");
     assert(new_method->name() == old_method->name(), "invariant");
     assert(new_method->signature() == old_method->signature(), "invariant");
-    *new_method->trace_flags_addr() = old_method->trace_flags();
+    new_method->set_trace_flags(old_method->trace_flags());
     assert(new_method->trace_flags() == old_method->trace_flags(), "invariant");
   }
 }
--- a/src/hotspot/share/jfr/jfr.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/jfr.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,7 +84,9 @@
 }
 
 void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
-  LeakProfiler::oops_do(is_alive, f);
+  if (LeakProfiler::is_running()) {
+    LeakProfiler::oops_do(is_alive, f);
+  }
 }
 
 bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter) {
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -55,18 +55,23 @@
   return !_edges->has_entries();
 }
 
-void EdgeStore::assign_id(EdgeEntry* entry) {
+void EdgeStore::on_link(EdgeEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->id() == 0, "invariant");
   entry->set_id(++_edge_id_counter);
 }
 
-bool EdgeStore::equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry) {
+bool EdgeStore::on_equals(uintptr_t hash, const EdgeEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->hash() == hash, "invariant");
   return true;
 }
 
+void EdgeStore::on_unlink(EdgeEntry* entry) {
+  assert(entry != NULL, "invariant");
+  // nothing
+}
+
 #ifdef ASSERT
 bool EdgeStore::contains(const oop* reference) const {
   return get(reference) != NULL;
@@ -75,22 +80,21 @@
 
 StoredEdge* EdgeStore::get(const oop* reference) const {
   assert(reference != NULL, "invariant");
-  const StoredEdge e(NULL, reference);
-  EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference);
+  EdgeEntry* const entry = _edges->lookup_only((uintptr_t)reference);
   return entry != NULL ? entry->literal_addr() : NULL;
 }
 
 StoredEdge* EdgeStore::put(const oop* reference) {
   assert(reference != NULL, "invariant");
   const StoredEdge e(NULL, reference);
-  assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant");
-  EdgeEntry& entry = _edges->put(e, (uintptr_t)reference);
+  assert(NULL == _edges->lookup_only((uintptr_t)reference), "invariant");
+  EdgeEntry& entry = _edges->put((uintptr_t)reference, e);
   return entry.literal_addr();
 }
 
 traceid EdgeStore::get_id(const Edge* edge) const {
   assert(edge != NULL, "invariant");
-  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
+  EdgeEntry* const entry = _edges->lookup_only((uintptr_t)edge->reference());
   assert(entry != NULL, "invariant");
   return entry->id();
 }
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -58,7 +58,7 @@
 };
 
 class EdgeStore : public CHeapObj<mtTracing> {
-  typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable;
+  typedef HashTableHost<StoredEdge, traceid, JfrHashtableEntry, EdgeStore> EdgeHashTable;
   typedef EdgeHashTable::HashEntry EdgeEntry;
   template <typename,
             typename,
@@ -74,8 +74,9 @@
   EdgeHashTable* _edges;
 
   // Hash table callbacks
-  void assign_id(EdgeEntry* entry);
-  bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
+  void on_link(EdgeEntry* entry);
+  bool on_equals(uintptr_t hash, const EdgeEntry* entry);
+  void on_unlink(EdgeEntry* entry);
 
   StoredEdge* get(const oop* reference) const;
   StoredEdge* put(const oop* reference);
--- a/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -43,7 +43,6 @@
 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
-#include "oops/markWord.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -101,7 +100,7 @@
   // Save the original markWord for the potential leak objects,
   // to be restored on function exit
   ObjectSampleMarker marker;
-  if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) {
+  if (ObjectSampleCheckpoint::save_mark_words(_sampler, marker, _emit_all) == 0) {
     // no valid samples to process
     return;
   }
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -24,10 +24,6 @@
 
 #include "precompiled.hpp"
 #include "jfr/jfrEvents.hpp"
-#include "jfr/recorder/jfrRecorder.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
-#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
 #include "jfr/leakprofiler/chains/edgeStore.hpp"
 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
@@ -35,14 +31,101 @@
 #include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/sampling/objectSample.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/leakprofiler/utilities/rootType.hpp"
-#include "jfr/metadata/jfrSerializer.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/thread.inline.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/utilities/jfrHashtable.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/growableArray.hpp"
+
+static bool predicate(GrowableArray<traceid>* set, traceid id) {
+  assert(set != NULL, "invariant");
+  bool found = false;
+  set->find_sorted<traceid, compare_traceid>(id, found);
+  return found;
+}
+
+static bool mutable_predicate(GrowableArray<traceid>* set, traceid id) {
+  assert(set != NULL, "invariant");
+  bool found = false;
+  const int location = set->find_sorted<traceid, compare_traceid>(id, found);
+  if (!found) {
+    set->insert_before(location, id);
+  }
+  return found;
+}
+
+static bool add(GrowableArray<traceid>* set, traceid id) {
+  assert(set != NULL, "invariant");
+  return mutable_predicate(set, id);
+}
+
+const int initial_array_size = 64;
+
+template <typename T>
+static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
+  return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
+}
+
+static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
 
-template <typename SampleProcessor>
-static void do_samples(ObjectSample* sample, const ObjectSample* const end, SampleProcessor& processor) {
+class ThreadIdExclusiveAccess : public StackObj {
+ private:
+  static Semaphore _mutex_semaphore;
+ public:
+  ThreadIdExclusiveAccess() { _mutex_semaphore.wait(); }
+  ~ThreadIdExclusiveAccess() { _mutex_semaphore.signal(); }
+};
+
+Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
+
+static bool has_thread_exited(traceid tid) {
+  assert(tid != 0, "invariant");
+  return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid);
+}
+
+static void add_to_unloaded_thread_set(traceid tid) {
+  ThreadIdExclusiveAccess lock;
+  if (unloaded_thread_id_set == NULL) {
+    unloaded_thread_id_set = c_heap_allocate_array<traceid>();
+  }
+  add(unloaded_thread_id_set, tid);
+}
+
+void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  if (LeakProfiler::is_running()) {
+    add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id());
+  }
+}
+
+// Track the set of unloaded klasses during a chunk / epoch.
+// Methods in stacktraces belonging to unloaded klasses must not be accessed.
+static GrowableArray<traceid>* unloaded_klass_set = NULL;
+
+static void add_to_unloaded_klass_set(traceid klass_id) {
+  if (unloaded_klass_set == NULL) {
+    unloaded_klass_set = c_heap_allocate_array<traceid>();
+  }
+  unloaded_klass_set->append(klass_id);
+}
+
+static void sort_unloaded_klass_set() {
+  if (unloaded_klass_set != NULL && unloaded_klass_set->length() > 1) {
+    unloaded_klass_set->sort(sort_traceid);
+  }
+}
+
+void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) {
+  assert(k != NULL, "invariant");
+  add_to_unloaded_klass_set(TRACE_ID(k));
+}
+
+template <typename Processor>
+static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
   assert(sample != NULL, "invariant");
   while (sample != end) {
     processor.sample_do(sample);
@@ -50,244 +133,339 @@
   }
 }
 
-class RootSystemType : public JfrSerializer {
- public:
-  void serialize(JfrCheckpointWriter& writer) {
-    const u4 nof_root_systems = OldObjectRoot::_number_of_systems;
-    writer.write_count(nof_root_systems);
-    for (u4 i = 0; i < nof_root_systems; ++i) {
-      writer.write_key(i);
-      writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i));
-    }
-  }
-};
+template <typename Processor>
+static void iterate_samples(Processor& processor, bool all = false) {
+  ObjectSampler* const sampler = ObjectSampler::sampler();
+  assert(sampler != NULL, "invariant");
+  ObjectSample* const last = sampler->last();
+  assert(last != NULL, "invariant");
+  do_samples(last, all ? NULL : sampler->last_resolved(), processor);
+}
 
-class RootType : public JfrSerializer {
+class SampleMarker {
+ private:
+  ObjectSampleMarker& _marker;
+  jlong _last_sweep;
+  int _count;
  public:
-  void serialize(JfrCheckpointWriter& writer) {
-    const u4 nof_root_types = OldObjectRoot::_number_of_types;
-    writer.write_count(nof_root_types);
-    for (u4 i = 0; i < nof_root_types; ++i) {
-      writer.write_key(i);
-      writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i));
-    }
-  }
-};
-
-class CheckpointInstall {
- private:
-  const JfrCheckpointBlobHandle& _cp;
- public:
-  CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {}
+  SampleMarker(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), _last_sweep(last_sweep), _count(0) {}
   void sample_do(ObjectSample* sample) {
-    assert(sample != NULL, "invariant");
-    if (!sample->is_dead()) {
-      sample->set_klass_checkpoint(_cp);
+    if (sample->is_alive_and_older_than(_last_sweep)) {
+      _marker.mark(sample->object());
+      ++_count;
     }
   }
-};
-
-class CheckpointWrite {
- private:
-  JfrCheckpointWriter& _writer;
-  const jlong _last_sweep;
- public:
-  CheckpointWrite(JfrCheckpointWriter& writer, jlong last_sweep) : _writer(writer), _last_sweep(last_sweep) {}
-  void sample_do(ObjectSample* sample) {
-    assert(sample != NULL, "invariant");
-    if (sample->is_alive_and_older_than(_last_sweep)) {
-      if (sample->has_thread_checkpoint()) {
-        const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
-        thread_cp->exclusive_write(_writer);
-      }
-      if (sample->has_klass_checkpoint()) {
-        const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
-        klass_cp->exclusive_write(_writer);
-      }
-    }
-  }
-};
-
-class CheckpointStateReset {
- private:
-  const jlong _last_sweep;
- public:
-  CheckpointStateReset(jlong last_sweep) : _last_sweep(last_sweep) {}
-  void sample_do(ObjectSample* sample) {
-    assert(sample != NULL, "invariant");
-    if (sample->is_alive_and_older_than(_last_sweep)) {
-      if (sample->has_thread_checkpoint()) {
-        const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
-        thread_cp->reset_write_state();
-      }
-      if (sample->has_klass_checkpoint()) {
-        const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
-        klass_cp->reset_write_state();
-      }
-    }
-  }
-};
-
-class StackTraceWrite {
- private:
-  JfrStackTraceRepository& _stack_trace_repo;
-  JfrCheckpointWriter& _writer;
-  int _count;
- public:
-  StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer) :
-    _stack_trace_repo(stack_trace_repo), _writer(writer), _count(0) {
-    JfrStacktrace_lock->lock_without_safepoint_check();
-  }
-  ~StackTraceWrite() {
-    assert(JfrStacktrace_lock->owned_by_self(), "invariant");
-    JfrStacktrace_lock->unlock();
-  }
-
-  void sample_do(ObjectSample* sample) {
-    assert(sample != NULL, "invariant");
-    if (!sample->is_dead()) {
-      if (sample->has_stack_trace()) {
-        JfrTraceId::use(sample->klass(), true);
-        _stack_trace_repo.write(_writer, sample->stack_trace_id(), sample->stack_trace_hash());
-        ++_count;
-      }
-    }
-  }
-
   int count() const {
     return _count;
   }
 };
 
-class SampleMark {
+int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) {
+  assert(sampler != NULL, "invariant");
+  if (sampler->last() == NULL) {
+    return 0;
+  }
+  SampleMarker sample_marker(marker, emit_all ? max_jlong : sampler->last_sweep().value());
+  iterate_samples(sample_marker, true);
+  return sample_marker.count();
+}
+
+class BlobCache {
+  typedef HashTableHost<JfrBlobHandle, traceid, JfrHashtableEntry, BlobCache> BlobTable;
+  typedef BlobTable::HashEntry BlobEntry;
  private:
-  ObjectSampleMarker& _marker;
-  jlong _last_sweep;
-  int _count;
+  BlobTable _table;
+  traceid _lookup_id;
  public:
-  SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker),
-                                                             _last_sweep(last_sweep),
-                                                             _count(0) {}
+  BlobCache(size_t size) : _table(this, size), _lookup_id(0) {}
+  JfrBlobHandle get(const ObjectSample* sample);
+  void put(const ObjectSample* sample, const JfrBlobHandle& blob);
+  // Hash table callbacks
+  void on_link(const BlobEntry* entry) const;
+  bool on_equals(uintptr_t hash, const BlobEntry* entry) const;
+  void on_unlink(BlobEntry* entry) const;
+};
+
+JfrBlobHandle BlobCache::get(const ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  _lookup_id = sample->stack_trace_id();
+  assert(_lookup_id != 0, "invariant");
+  BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash());
+  return entry != NULL ? entry->literal() : JfrBlobHandle();
+}
+
+void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) {
+  assert(sample != NULL, "invariant");
+  assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant");
+  _lookup_id = sample->stack_trace_id();
+  assert(_lookup_id != 0, "invariant");
+  _table.put(sample->stack_trace_hash(), blob);
+}
+
+inline void BlobCache::on_link(const BlobEntry* entry) const {
+  assert(entry != NULL, "invariant");
+  assert(entry->id() == 0, "invariant");
+  entry->set_id(_lookup_id);
+}
+
+inline bool BlobCache::on_equals(uintptr_t hash, const BlobEntry* entry) const {
+  assert(entry != NULL, "invariant");
+  assert(entry->hash() == hash, "invariant");
+  return entry->id() == _lookup_id;
+}
+
+inline void BlobCache::on_unlink(BlobEntry* entry) const {
+  assert(entry != NULL, "invariant");
+}
+
+static GrowableArray<traceid>* id_set = NULL;
+
+static void prepare_for_resolution() {
+  id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size());
+  sort_unloaded_klass_set();
+}
+
+static bool stack_trace_precondition(const ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  return sample->has_stack_trace_id() && !sample->is_dead();
+}
+
+class StackTraceBlobInstaller {
+ private:
+  const JfrStackTraceRepository& _stack_trace_repo;
+  BlobCache _cache;
+  const JfrStackTrace* resolve(const ObjectSample* sample);
+  void install(ObjectSample* sample);
+ public:
+  StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo);
   void sample_do(ObjectSample* sample) {
-    assert(sample != NULL, "invariant");
-    if (sample->is_alive_and_older_than(_last_sweep)) {
-      _marker.mark(sample->object());
-      ++_count;
+    if (stack_trace_precondition(sample)) {
+      install(sample);
     }
   }
-
-  int count() const {
-    return _count;
-  }
 };
 
-void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool type_set) {
-  if (!writer.has_data()) {
+StackTraceBlobInstaller::StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo) :
+  _stack_trace_repo(stack_trace_repo), _cache(JfrOptionSet::old_object_queue_size()) {
+  prepare_for_resolution();
+}
+
+const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) {
+  return _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id());
+}
+
+#ifdef ASSERT
+static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) {
+  assert(!sample->has_stacktrace(), "invariant");
+  assert(stack_trace != NULL, "invariant");
+  assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant");
+  assert(stack_trace->id() == sample->stack_trace_id(), "invariant");
+}
+#endif
+
+void StackTraceBlobInstaller::install(ObjectSample* sample) {
+  JfrBlobHandle blob = _cache.get(sample);
+  if (blob.valid()) {
+    sample->set_stacktrace(blob);
     return;
   }
-
-  assert(writer.has_data(), "invariant");
-  const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
-  CheckpointInstall install(h_cp);
-
-  // Class unload implies a safepoint.
-  // Not class unload implies the object sampler is locked, because it was claimed exclusively earlier.
-  // Therefore: direct access the object sampler instance is safe.
-  ObjectSampler* const object_sampler = ObjectSampler::sampler();
-  assert(object_sampler != NULL, "invariant");
+  const JfrStackTrace* const stack_trace = resolve(sample);
+  DEBUG_ONLY(validate_stack_trace(sample, stack_trace));
+  JfrCheckpointWriter writer(false, true, Thread::current());
+  writer.write_type(TYPE_STACKTRACE);
+  writer.write_count(1);
+  ObjectSampleCheckpoint::write_stacktrace(stack_trace, writer);
+  blob = writer.move();
+  _cache.put(sample, blob);
+  sample->set_stacktrace(blob);
+}
 
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  const ObjectSample* const last_resolved = object_sampler->last_resolved();
-
-  // install only to new samples since last resolved checkpoint
-  if (last != last_resolved) {
-    do_samples(last, last_resolved, install);
-    if (class_unload) {
-      return;
-    }
-    if (type_set) {
-      object_sampler->set_last_resolved(last);
-    }
+static void install_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
+  assert(sampler != NULL, "invariant");
+  const ObjectSample* const last = sampler->last();
+  if (last != sampler->last_resolved()) {
+    StackTraceBlobInstaller installer(stack_trace_repo);
+    iterate_samples(installer);
   }
 }
 
-void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
+// caller needs ResourceMark
+void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
+  assert(sampler != NULL, "invariant");
+  assert(LeakProfiler::is_running(), "invariant");
+  install_stack_traces(sampler, stack_trace_repo);
+}
+
+static traceid get_klass_id(traceid method_id) {
+  assert(method_id != 0, "invariant");
+  return method_id >> TRACE_ID_SHIFT;
+}
+
+static bool is_klass_unloaded(traceid method_id) {
+  return unloaded_klass_set != NULL && predicate(unloaded_klass_set, get_klass_id(method_id));
+}
+
+static bool is_processed(traceid id) {
+  assert(id != 0, "invariant");
+  assert(id_set != NULL, "invariant");
+  return mutable_predicate(id_set, id);
+}
+
+void ObjectSampleCheckpoint::add_to_leakp_set(const Method* method, traceid method_id) {
+  if (is_processed(method_id) || is_klass_unloaded(method_id)) {
+    return;
+  }
+  JfrTraceId::set_leakp(method);
+}
+
+void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
+  assert(trace != NULL, "invariant");
+  // JfrStackTrace
+  writer.write(trace->id());
+  writer.write((u1)!trace->_reached_root);
+  writer.write(trace->_nr_of_frames);
+  // JfrStackFrames
+  for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
+    const JfrStackFrame& frame = trace->_frames[i];
+    frame.write(writer);
+    add_to_leakp_set(frame._method, frame._methodid);
+  }
+}
+
+static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) {
+  if (reset) {
+    blob->reset_write_state();
+    return;
+  }
+  blob->exclusive_write(writer);
+}
+
+static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
+  if (sample->has_type_set()) {
+    write_blob(sample->type_set(), writer, reset);
+  }
+}
+
+static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
+  assert(sample->has_thread(), "invariant");
+  if (has_thread_exited(sample->thread_id())) {
+    write_blob(sample->thread(), writer, reset);
+  }
+}
+
+static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
+  if (sample->has_stacktrace()) {
+    write_blob(sample->stacktrace(), writer, reset);
+  }
+}
+
+static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
+  assert(sample != NULL, "invariant");
+  write_stacktrace_blob(sample, writer, reset);
+  write_thread_blob(sample, writer, reset);
+  write_type_set_blob(sample, writer, reset);
+}
+
+class BlobWriter {
+ private:
+  const ObjectSampler* _sampler;
+  JfrCheckpointWriter& _writer;
+  const jlong _last_sweep;
+  bool _reset;
+ public:
+  BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
+    _sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false)  {}
+  void sample_do(ObjectSample* sample) {
+    if (sample->is_alive_and_older_than(_last_sweep)) {
+      write_blobs(sample, _writer, _reset);
+    }
+  }
+  void set_reset() {
+    _reset = true;
+  }
+};
+
+static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) {
+  // sample set is predicated on time of last sweep
+  const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
+  JfrCheckpointWriter writer(false, false, thread);
+  BlobWriter cbw(sampler, writer, last_sweep);
+  iterate_samples(cbw, true);
+  // reset blob write states
+  cbw.set_reset();
+  iterate_samples(cbw, true);
+}
+
+void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
   assert(sampler != NULL, "invariant");
   assert(edge_store != NULL, "invariant");
   assert(thread != NULL, "invariant");
-
-  static bool types_registered = false;
-  if (!types_registered) {
-    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
-    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
-    types_registered = true;
-  }
-
-  const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
-  ObjectSample* const last = const_cast<ObjectSample*>(sampler->last());
-  {
-    JfrCheckpointWriter writer(false, false, thread);
-    CheckpointWrite checkpoint_write(writer, last_sweep);
-    do_samples(last, NULL, checkpoint_write);
-  }
-
-  CheckpointStateReset state_reset(last_sweep);
-  do_samples(last, NULL, state_reset);
-
+  write_sample_blobs(sampler, emit_all, thread);
+  // write reference chains
   if (!edge_store->is_empty()) {
-    // java object and chain representations
     JfrCheckpointWriter writer(false, true, thread);
     ObjectSampleWriter osw(writer, edge_store);
     edge_store->iterate(osw);
   }
 }
 
-int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) {
-  assert(object_sampler != NULL, "invariant");
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  if (last == NULL) {
-    return 0;
+static void clear_unloaded_klass_set() {
+  if (unloaded_klass_set != NULL && unloaded_klass_set->is_nonempty()) {
+    unloaded_klass_set->clear();
   }
-  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
-  SampleMark mark(marker, last_sweep);
-  do_samples(last, NULL, mark);
-  return mark.count();
+}
+
+// A linked list of saved type set blobs for the epoch.
+// The link consist of a reference counted handle.
+static JfrBlobHandle saved_type_set_blobs;
+
+static void release_state_for_previous_epoch() {
+  // decrements the reference count and the list is reinitialized
+  saved_type_set_blobs = JfrBlobHandle();
+  clear_unloaded_klass_set();
 }
 
-WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) :
-  _sampler(sampler), _stack_trace_repo(repo) {}
-
-bool WriteObjectSampleStacktrace::process() {
-  assert(LeakProfiler::is_running(), "invariant");
-  assert(_sampler != NULL, "invariant");
+class BlobInstaller {
+ public:
+  ~BlobInstaller() {
+    release_state_for_previous_epoch();
+  }
+  void sample_do(ObjectSample* sample) {
+    if (!sample->is_dead()) {
+      sample->set_type_set(saved_type_set_blobs);
+    }
+  }
+};
 
-  ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last());
-  const ObjectSample* const last_resolved = _sampler->last_resolved();
-  if (last == last_resolved) {
-    return true;
-  }
-
-  JfrCheckpointWriter writer(false, true, Thread::current());
-  const JfrCheckpointContext ctx = writer.context();
-
-  writer.write_type(TYPE_STACKTRACE);
-  const jlong count_offset = writer.reserve(sizeof(u4));
+static void install_type_set_blobs() {
+  BlobInstaller installer;
+  iterate_samples(installer);
+}
 
-  int count = 0;
-  {
-    StackTraceWrite stack_trace_write(_stack_trace_repo, writer); // JfrStacktrace_lock
-    do_samples(last, last_resolved, stack_trace_write);
-    count = stack_trace_write.count();
+static void save_type_set_blob(JfrCheckpointWriter& writer, bool copy = false) {
+  assert(writer.has_data(), "invariant");
+  const JfrBlobHandle blob = copy ? writer.copy() : writer.move();
+  if (saved_type_set_blobs.valid()) {
+    saved_type_set_blobs->set_next(blob);
+  } else {
+    saved_type_set_blobs = blob;
   }
-  if (count == 0) {
-    writer.set_context(ctx);
-    return true;
+}
+
+void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
+  assert(LeakProfiler::is_running(), "invariant");
+  const ObjectSample* last = ObjectSampler::sampler()->last();
+  if (writer.has_data() && last != NULL) {
+    save_type_set_blob(writer);
+    install_type_set_blobs();
+    ObjectSampler::sampler()->set_last_resolved(last);
   }
-  assert(count > 0, "invariant");
-  writer.write_count((u4)count, count_offset);
-  JfrStackTraceRepository::write_metadata(writer);
+}
 
-  // install the stacktrace checkpoint information to the candidates
-  ObjectSampleCheckpoint::install(writer, false, false);
-  return true;
+void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(LeakProfiler::is_running(), "invariant");
+  if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
+    save_type_set_blob(writer, true);
+  }
 }
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -26,27 +26,35 @@
 #define SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
 
 #include "memory/allocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
 
 class EdgeStore;
+class JavaThread;
 class JfrCheckpointWriter;
+class JfrStackTrace;
 class JfrStackTraceRepository;
+class Klass;
+class Method;
+class ObjectSample;
 class ObjectSampleMarker;
 class ObjectSampler;
+class Thread;
 
 class ObjectSampleCheckpoint : AllStatic {
+  friend class EventEmitter;
+  friend class PathToGcRootsOperation;
+  friend class StackTraceBlobInstaller;
+ private:
+  static void add_to_leakp_set(const Method* method, traceid method_id);
+  static int save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
+  static void write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer);
+  static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
  public:
-  static void install(JfrCheckpointWriter& writer, bool class_unload, bool type_set);
-  static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
-  static int mark(ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
-};
-
-class WriteObjectSampleStacktrace : public StackObj {
- private:
-  ObjectSampler* const _sampler;
-  JfrStackTraceRepository& _stack_trace_repo;
- public:
-  WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo);
-  bool process();
+  static void on_klass_unload(const Klass* k);
+  static void on_type_set(JfrCheckpointWriter& writer);
+  static void on_type_set_unload(JfrCheckpointWriter& writer);
+  static void on_thread_exit(JavaThread* jt);
+  static void on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& repo);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -33,8 +33,8 @@
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
-#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
-#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp"
+#include "jfr/metadata/jfrSerializer.hpp"
+#include "jfr/writers/jfrTypeWriterHost.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 #include "utilities/growableArray.hpp"
@@ -137,30 +137,33 @@
             typename,
             size_t>
   friend class HashTableHost;
-  typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, Entry, FieldTable, 109> FieldInfoTable;
+  typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, JfrHashtableEntry, FieldTable, 109> FieldInfoTable;
  public:
   typedef FieldInfoTable::HashEntry FieldInfoEntry;
 
  private:
   static traceid _field_id_counter;
   FieldInfoTable* _table;
+  const ObjectSampleFieldInfo* _lookup;
 
-  void assign_id(FieldInfoEntry* entry) {
+  void on_link(FieldInfoEntry* entry) {
     assert(entry != NULL, "invariant");
     entry->set_id(++_field_id_counter);
   }
 
-  bool equals(const ObjectSampleFieldInfo* query, uintptr_t hash, const FieldInfoEntry* entry) {
+  bool on_equals(uintptr_t hash, const FieldInfoEntry* entry) {
     assert(hash == entry->hash(), "invariant");
-    assert(query != NULL, "invariant");
-    const ObjectSampleFieldInfo* stored = entry->literal();
-    assert(stored != NULL, "invariant");
-    assert(stored->_field_name_symbol->identity_hash() == query->_field_name_symbol->identity_hash(), "invariant");
-    return stored->_field_modifiers == query->_field_modifiers;
+    assert(_lookup != NULL, "invariant");
+    return entry->literal()->_field_modifiers == _lookup->_field_modifiers;
+  }
+
+  void on_unlink(FieldInfoEntry* entry) {
+    assert(entry != NULL, "invariant");
+    // nothing
   }
 
  public:
-  FieldTable() : _table(new FieldInfoTable(this)) {}
+  FieldTable() : _table(new FieldInfoTable(this)), _lookup(NULL) {}
   ~FieldTable() {
     assert(_table != NULL, "invariant");
     delete _table;
@@ -168,8 +171,8 @@
 
   traceid store(const ObjectSampleFieldInfo* field_info) {
     assert(field_info != NULL, "invariant");
-    const FieldInfoEntry& entry =_table->lookup_put(field_info,
-                                                    field_info->_field_name_symbol->identity_hash());
+    _lookup = field_info;
+    const FieldInfoEntry& entry = _table->lookup_put(field_info->_field_name_symbol->identity_hash(), field_info);
     return entry.id();
   }
 
@@ -196,7 +199,7 @@
 static FieldTable* field_infos = NULL;
 static RootDescriptionInfo* root_infos = NULL;
 
-int __write_sample_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* si) {
+int __write_sample_info__(JfrCheckpointWriter* writer, const void* si) {
   assert(writer != NULL, "invariant");
   assert(si != NULL, "invariant");
   const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si;
@@ -211,17 +214,17 @@
   return 1;
 }
 
-typedef JfrArtifactWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl;
-typedef JfrArtifactWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter;
+typedef JfrTypeWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl;
+typedef JfrTypeWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter;
 
 static void write_sample_infos(JfrCheckpointWriter& writer) {
   if (sample_infos != NULL) {
-    SampleWriter sw(&writer, NULL, false);
+    SampleWriter sw(&writer);
     sample_infos->iterate(sw);
   }
 }
 
-int __write_reference_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ri) {
+int __write_reference_info__(JfrCheckpointWriter* writer, const void* ri) {
   assert(writer != NULL, "invariant");
   assert(ri != NULL, "invariant");
   const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri;
@@ -233,17 +236,17 @@
   return 1;
 }
 
-typedef JfrArtifactWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl;
-typedef JfrArtifactWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter;
+typedef JfrTypeWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl;
+typedef JfrTypeWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter;
 
 static void write_reference_infos(JfrCheckpointWriter& writer) {
   if (ref_infos != NULL) {
-    ReferenceWriter rw(&writer, NULL, false);
+    ReferenceWriter rw(&writer);
     ref_infos->iterate(rw);
   }
 }
 
-int __write_array_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ai) {
+int __write_array_info__(JfrCheckpointWriter* writer, const void* ai) {
   assert(writer != NULL, "invariant");
   assert(ai != NULL, "invariant");
   const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai;
@@ -270,17 +273,17 @@
   return array_infos->store(osai);
 }
 
-typedef JfrArtifactWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl;
-typedef JfrArtifactWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter;
+typedef JfrTypeWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl;
+typedef JfrTypeWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter;
 
 static void write_array_infos(JfrCheckpointWriter& writer) {
   if (array_infos != NULL) {
-    ArrayWriter aw(&writer, NULL, false);
+    ArrayWriter aw(&writer);
     array_infos->iterate(aw);
   }
 }
 
-int __write_field_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* fi) {
+int __write_field_info__(JfrCheckpointWriter* writer, const void* fi) {
   assert(writer != NULL, "invariant");
   assert(fi != NULL, "invariant");
   const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi;
@@ -314,12 +317,12 @@
   return field_infos->store(osfi);
 }
 
-typedef JfrArtifactWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl;
-typedef JfrArtifactWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter;
+typedef JfrTypeWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl;
+typedef JfrTypeWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter;
 
 static void write_field_infos(JfrCheckpointWriter& writer) {
   if (field_infos != NULL) {
-    FieldWriter fw(&writer, NULL, false);
+    FieldWriter fw(&writer);
     field_infos->iterate(fw);
   }
 }
@@ -339,7 +342,7 @@
   return description.description();
 }
 
-int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* di) {
+int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di) {
   assert(writer != NULL, "invariant");
   assert(di != NULL, "invariant");
   const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di;
@@ -366,8 +369,8 @@
   return root_infos->store(oodi);
 }
 
-typedef JfrArtifactWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl;
-typedef JfrArtifactWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter;
+typedef JfrTypeWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl;
+typedef JfrTypeWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter;
 
 
 int _edge_reference_compare_(uintptr_t lhs, uintptr_t rhs) {
@@ -513,7 +516,7 @@
     RootResolutionSet rrs(root_infos);
     RootResolver::resolve(rrs);
     // write roots
-    RootDescriptionWriter rw(&writer, NULL, false);
+    RootDescriptionWriter rw(&writer);
     root_infos->iterate(rw);
   }
 }
@@ -576,11 +579,45 @@
   }
 }
 
+class RootSystemType : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer) {
+    const u4 nof_root_systems = OldObjectRoot::_number_of_systems;
+    writer.write_count(nof_root_systems);
+    for (u4 i = 0; i < nof_root_systems; ++i) {
+      writer.write_key(i);
+      writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i));
+    }
+  }
+};
+
+class RootType : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer) {
+    const u4 nof_root_types = OldObjectRoot::_number_of_types;
+    writer.write_count(nof_root_types);
+    for (u4 i = 0; i < nof_root_types; ++i) {
+      writer.write_key(i);
+      writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i));
+    }
+  }
+};
+
+static void register_serializers() {
+  static bool is_registered = false;
+  if (!is_registered) {
+    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
+    JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
+    is_registered = true;
+  }
+}
+
 ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) :
   _writer(writer),
   _store(store) {
   assert(store != NULL, "invariant");
   assert(!store->is_empty(), "invariant");
+  register_serializers();
   sample_infos = NULL;
   ref_infos = NULL;
   array_infos = NULL;
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -32,7 +32,6 @@
 #include "memory/iterator.hpp"
 #include "memory/universe.hpp"
 #include "oops/klass.hpp"
-#include "oops/markWord.hpp"
 #include "oops/oop.hpp"
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/frame.inline.hpp"
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -25,13 +25,14 @@
 #ifndef SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
 #define SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
 
-#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
 #include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
 #include "jfr/utilities/jfrTime.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
 #include "utilities/ticks.hpp"
+
 /*
  * Handle for diagnosing Java memory leaks.
  *
@@ -44,8 +45,9 @@
  private:
   ObjectSample* _next;
   ObjectSample* _previous;
-  JfrCheckpointBlobHandle _thread_cp;
-  JfrCheckpointBlobHandle _klass_cp;
+  JfrBlobHandle _stacktrace;
+  JfrBlobHandle _thread;
+  JfrBlobHandle _type_set;
   oop _object;
   Ticks _allocation_time;
   traceid _stack_trace_id;
@@ -62,17 +64,14 @@
   }
 
   void release_references() {
-    if (_thread_cp.valid()) {
-      _thread_cp.~JfrCheckpointBlobHandle();
-    }
-    if (_klass_cp.valid()) {
-      _klass_cp.~JfrCheckpointBlobHandle();
-    }
+    _stacktrace.~JfrBlobHandle();
+    _thread.~JfrBlobHandle();
+    _type_set.~JfrBlobHandle();
   }
 
   void reset() {
     set_stack_trace_id(0);
-    set_stack_trace_hash(0),
+    set_stack_trace_hash(0);
     release_references();
     _dead = false;
   }
@@ -80,8 +79,9 @@
  public:
   ObjectSample() : _next(NULL),
                    _previous(NULL),
-                   _thread_cp(),
-                   _klass_cp(),
+                   _stacktrace(),
+                   _thread(),
+                   _type_set(),
                    _object(NULL),
                    _allocation_time(),
                    _stack_trace_id(0),
@@ -174,7 +174,7 @@
     return _heap_used_at_last_gc;
   }
 
-  bool has_stack_trace() const {
+  bool has_stack_trace_id() const {
     return stack_trace_id() != 0;
   }
 
@@ -194,10 +194,6 @@
     _stack_trace_hash = hash;
   }
 
-  bool has_thread() const {
-    return _thread_id != 0;
-  }
-
   traceid thread_id() const {
     return _thread_id;
   }
@@ -211,37 +207,51 @@
       _allocation_time.ft_value() : _allocation_time.value()) < time_stamp;
   }
 
-  const JfrCheckpointBlobHandle& thread_checkpoint() const {
-    return _thread_cp;
+  const JfrBlobHandle& stacktrace() const {
+    return _stacktrace;
   }
 
-  bool has_thread_checkpoint() const {
-    return _thread_cp.valid();
+  bool has_stacktrace() const {
+    return _stacktrace.valid();
   }
 
-  // JfrCheckpointBlobHandle assignment operator
+  // JfrBlobHandle assignment operator
   // maintains proper reference counting
-  void set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) {
-    if (_thread_cp != ref) {
-      _thread_cp = ref;
+  void set_stacktrace(const JfrBlobHandle& ref) {
+    if (_stacktrace != ref) {
+      _stacktrace = ref;
     }
   }
 
-  const JfrCheckpointBlobHandle& klass_checkpoint() const {
-    return _klass_cp;
+  const JfrBlobHandle& thread() const {
+    return _thread;
   }
 
-  bool has_klass_checkpoint() const {
-    return _klass_cp.valid();
+  bool has_thread() const {
+    return _thread.valid();
+  }
+
+  void set_thread(const JfrBlobHandle& ref) {
+    if (_thread != ref) {
+      _thread = ref;
+    }
   }
 
-  void set_klass_checkpoint(const JfrCheckpointBlobHandle& ref) {
-    if (_klass_cp != ref) {
-      if (_klass_cp.valid()) {
-        _klass_cp->set_next(ref);
+  const JfrBlobHandle& type_set() const {
+    return _type_set;
+  }
+
+  bool has_type_set() const {
+    return _type_set.valid();
+  }
+
+  void set_type_set(const JfrBlobHandle& ref) {
+    if (_type_set != ref) {
+      if (_type_set.valid()) {
+        _type_set->set_next(ref);
         return;
       }
-      _klass_cp = ref;
+      _type_set = ref;
     }
   }
 };
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -110,63 +110,42 @@
   }
   const JfrThreadLocal* const tl = thread->jfr_thread_local();
   assert(tl != NULL, "invariant");
-  if (!tl->has_thread_checkpoint()) {
-    JfrCheckpointManager::create_thread_checkpoint(thread);
+  if (!tl->has_thread_blob()) {
+    JfrCheckpointManager::create_thread_blob(thread);
   }
-  assert(tl->has_thread_checkpoint(), "invariant");
+  assert(tl->has_thread_blob(), "invariant");
   return tl->thread_id();
 }
 
-// Populates the thread local stack frames, but does not add them
-// to the stacktrace repository (...yet, see stacktrace_id() below)
-//
-void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) {
-  assert(stacktrace != NULL, "invariant");
+static void record_stacktrace(JavaThread* thread) {
   assert(thread != NULL, "invariant");
   if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
-    JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0);
+    JfrStackTraceRepository::record_and_cache(thread);
   }
 }
 
-// We were successful in acquiring the try lock and have been selected for adding a sample.
-// Go ahead with installing our previously taken stacktrace into the stacktrace repository.
-//
-traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) {
-  assert(stacktrace != NULL, "invariant");
-  assert(stacktrace->hash() != 0, "invariant");
-  const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread);
-  thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash());
-  return stacktrace_id;
-}
-
 void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
   assert(thread != NULL, "invariant");
   assert(is_created(), "invariant");
-
   const traceid thread_id = get_thread_id(thread);
   if (thread_id == 0) {
     return;
   }
-
-  const JfrThreadLocal* const tl = thread->jfr_thread_local();
-  JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
-  fill_stacktrace(&stacktrace, thread);
-
+  record_stacktrace(thread);
   // try enter critical section
   JfrTryLock tryLock(&_lock);
   if (!tryLock.has_lock()) {
     log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
     return;
   }
-
-  instance().add(obj, allocated, thread_id, &stacktrace, thread);
+  instance().add(obj, allocated, thread_id, thread);
 }
 
-void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) {
-  assert(stacktrace != NULL, "invariant");
+void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JavaThread* thread) {
+  assert(obj != NULL, "invariant");
   assert(thread_id != 0, "invariant");
   assert(thread != NULL, "invariant");
-  assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+  assert(thread->jfr_thread_local()->has_thread_blob(), "invariant");
 
   if (_dead_samples) {
     scavenge();
@@ -190,11 +169,13 @@
 
   assert(sample != NULL, "invariant");
   sample->set_thread_id(thread_id);
-  sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint());
+
+  const JfrThreadLocal* const tl = thread->jfr_thread_local();
+  sample->set_thread(tl->thread_blob());
 
-  const unsigned int stacktrace_hash = stacktrace->hash();
+  const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
   if (stacktrace_hash != 0) {
-    sample->set_stack_trace_id(stacktrace_id(stacktrace, thread));
+    sample->set_stack_trace_id(tl->cached_stack_trace_id());
     sample->set_stack_trace_hash(stacktrace_hash);
   }
 
@@ -253,7 +234,7 @@
   sampler._last_sweep = JfrTicks::now();
 }
 
-const ObjectSample* ObjectSampler::last() const {
+ObjectSample* ObjectSampler::last() const {
   return _list->last();
 }
 
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -31,25 +31,19 @@
 typedef u8 traceid;
 
 class BoolObjectClosure;
-class JfrStackTrace;
+class JavaThread;
 class OopClosure;
 class ObjectSample;
-class ObjectSampler;
 class SampleList;
 class SamplePriorityQueue;
-class Thread;
 
 // Class reponsible for holding samples and
 // making sure the samples are evenly distributed as
 // new entries are added and removed.
 class ObjectSampler : public CHeapObj<mtTracing> {
-  friend class EventEmitter;
-  friend class JfrRecorderService;
   friend class LeakProfiler;
   friend class StartOperation;
   friend class StopOperation;
-  friend class ObjectSampleCheckpoint;
-  friend class WriteObjectSampleStacktrace;
  private:
   SamplePriorityQueue* _priority_queue;
   SampleList* _list;
@@ -64,20 +58,11 @@
   ~ObjectSampler();
   static bool create(size_t size);
   static bool is_created();
-  static ObjectSampler* sampler();
   static void destroy();
 
-  // For operations that require exclusive access (non-safepoint)
-  static ObjectSampler* acquire();
-  static void release();
-
-  // Stacktrace
-  static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread);
-  traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread);
-
   // Sampling
   static void sample(HeapWord* object, size_t size, JavaThread* thread);
-  void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread);
+  void add(HeapWord* object, size_t size, traceid thread_id, JavaThread* thread);
   void scavenge();
   void remove_dead(ObjectSample* sample);
 
@@ -87,8 +72,15 @@
   const ObjectSample* item_at(int index) const;
   ObjectSample* item_at(int index);
   int item_count() const;
+
+ public:
+  static ObjectSampler* sampler();
+  // For operations that require exclusive access (non-safepoint)
+  static ObjectSampler* acquire();
+  static void release();
+
   const ObjectSample* first() const;
-  const ObjectSample* last() const;
+  ObjectSample* last() const;
   const ObjectSample* last_resolved() const;
   void set_last_resolved(const ObjectSample* sample);
   const JfrTicks& last_sweep() const;
--- a/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -50,12 +50,12 @@
   SampleList(size_t limit, size_t cache_size = 0);
   ~SampleList();
 
-  void set_last_resolved(const ObjectSample* sample);
   ObjectSample* get();
+  ObjectSample* first() const;
   ObjectSample* last() const;
-  ObjectSample* first() const;
+  const ObjectSample* last_resolved() const;
+  void set_last_resolved(const ObjectSample* sample);
   void release(ObjectSample* sample);
-  const ObjectSample* last_resolved() const;
   ObjectSample* reuse(ObjectSample* sample);
   bool is_full() const;
   size_t count() const;
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-
-JfrCheckpointBlob::JfrCheckpointBlob(const u1* checkpoint, size_t size) :
-  _checkpoint(JfrCHeapObj::new_array<u1>(size)),
-  _size(size),
-  _next(),
-  _written(false) {
-  assert(checkpoint != NULL, "invariant");
-  assert(_checkpoint != NULL, "invariant");
-  memcpy(const_cast<u1*>(_checkpoint), checkpoint, size);
-}
-
-JfrCheckpointBlob::~JfrCheckpointBlob() {
-  JfrCHeapObj::free(const_cast<u1*>(_checkpoint), _size);
-}
-
-const JfrCheckpointBlobHandle& JfrCheckpointBlob::next() const {
-  return _next;
-}
-
-void JfrCheckpointBlob::write_this(JfrCheckpointWriter& writer) const {
-  writer.bytes(_checkpoint, _size);
-}
-
-void JfrCheckpointBlob::exclusive_write(JfrCheckpointWriter& writer) const {
-  if (!_written) {
-    write_this(writer);
-    _written = true;
-  }
-  if (_next.valid()) {
-    _next->exclusive_write(writer);
-  }
-}
-
-void JfrCheckpointBlob::write(JfrCheckpointWriter& writer) const {
-  write_this(writer);
-  if (_next.valid()) {
-    _next->write(writer);
-  }
-}
-
-void JfrCheckpointBlob::reset_write_state() const {
-  if (_written) {
-    _written = false;
-  }
-  if (_next.valid()) {
-    _next->reset_write_state();
-  }
-}
-
-void JfrCheckpointBlob::set_next(const JfrCheckpointBlobHandle& ref) {
-  if (_next == ref) {
-    return;
-  }
-  assert(_next != ref, "invariant");
-  if (_next.valid()) {
-    _next->set_next(ref);
-    return;
-  }
-  _next = ref;
-}
-
-JfrCheckpointBlobHandle JfrCheckpointBlob::make(const u1* checkpoint, size_t size) {
-  const JfrCheckpointBlob* cp_blob = new JfrCheckpointBlob(checkpoint, size);
-  assert(cp_blob != NULL, "invariant");
-  return JfrCheckpointBlobReference::make(cp_blob);
-}
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
-#define SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
-
-#include "jfr/utilities/jfrAllocation.hpp"
-#include "jfr/utilities/jfrRefCountPointer.hpp"
-
-class JfrCheckpointBlob;
-class JfrCheckpointWriter;
-
-typedef RefCountPointer<JfrCheckpointBlob, MultiThreadedRefCounter> JfrCheckpointBlobReference;
-typedef RefCountHandle<JfrCheckpointBlobReference> JfrCheckpointBlobHandle;
-
-class JfrCheckpointBlob : public JfrCHeapObj {
-  template <typename, typename>
-  friend class RefCountPointer;
- private:
-  const u1* _checkpoint;
-  const size_t _size;
-  JfrCheckpointBlobHandle _next;
-  mutable bool _written;
-
-  JfrCheckpointBlob(const u1* checkpoint, size_t size);
-  ~JfrCheckpointBlob();
-  const JfrCheckpointBlobHandle& next() const;
-  void write_this(JfrCheckpointWriter& writer) const;
-
- public:
-  void write(JfrCheckpointWriter& writer) const;
-  void exclusive_write(JfrCheckpointWriter& writer) const;
-  void reset_write_state() const;
-  void set_next(const JfrCheckpointBlobHandle& ref);
-  static JfrCheckpointBlobHandle make(const u1* checkpoint, size_t size);
-};
-
-#endif // SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -37,6 +37,7 @@
 #include "jfr/utilities/jfrTypes.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.inline.hpp"
@@ -87,22 +88,18 @@
 static const size_t checkpoint_buffer_cache_count = 2;
 static const size_t checkpoint_buffer_size = 512 * K;
 
-static JfrCheckpointMspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrCheckpointManager* system) {
-  JfrCheckpointMspace* mspace = new JfrCheckpointMspace(buffer_size, limit, cache_count, system);
-  if (mspace != NULL) {
-    mspace->initialize();
-  }
-  return mspace;
+static JfrCheckpointMspace* allocate_mspace(size_t size, size_t limit, size_t cache_count, JfrCheckpointManager* mgr) {
+  return create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(size, limit, cache_count, mgr);
 }
 
 bool JfrCheckpointManager::initialize() {
   assert(_free_list_mspace == NULL, "invariant");
-  _free_list_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
+  _free_list_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
   if (_free_list_mspace == NULL) {
     return false;
   }
   assert(_epoch_transition_mspace == NULL, "invariant");
-  _epoch_transition_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
+  _epoch_transition_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
   if (_epoch_transition_mspace == NULL) {
     return false;
   }
@@ -114,22 +111,6 @@
   return JfrTypeManager::initialize();
 }
 
-bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
-  return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
-}
-
-void JfrCheckpointManager::synchronize_epoch() {
-  assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant");
-  OrderAccess::storestore();
-  _checkpoint_epoch_state = JfrTraceIdEpoch::epoch();
-}
-
-void JfrCheckpointManager::shift_epoch() {
-  debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
-  JfrTraceIdEpoch::shift_epoch();
-  assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
-}
-
 void JfrCheckpointManager::register_service_thread(const Thread* thread) {
   _service_thread = thread;
 }
@@ -151,7 +132,6 @@
 }
 
 #ifdef ASSERT
-
 bool JfrCheckpointManager::is_locked() const {
   return _lock->owned_by_self();
 }
@@ -167,7 +147,6 @@
   assert(buffer->lease(), "invariant");
   assert(buffer->acquired_by_self(), "invariant");
 }
-
 #endif // ASSERT
 
 static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread) {
@@ -185,6 +164,10 @@
   return buffer;
 }
 
+bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
+  return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
+}
+
 static const size_t lease_retry = 10;
 
 BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) {
@@ -256,33 +239,33 @@
   return read_data<juint>(data + types_offset);
 }
 
-static void write_checkpoint_header(JfrChunkWriter& cw, intptr_t offset_prev_cp_event, const u1* data) {
+static void write_checkpoint_header(JfrChunkWriter& cw, int64_t offset_prev_cp_event, const u1* data) {
   cw.reserve(sizeof(u4));
-  cw.write((u8)EVENT_CHECKPOINT);
+  cw.write<u8>(EVENT_CHECKPOINT);
   cw.write(starttime(data));
   cw.write(duration(data));
-  cw.write((jlong)offset_prev_cp_event);
+  cw.write(offset_prev_cp_event);
   cw.write(is_flushpoint(data));
   cw.write(number_of_types(data));
 }
 
 static void write_checkpoint_content(JfrChunkWriter& cw, const u1* data, size_t size) {
   assert(data != NULL, "invariant");
-  cw.write_unbuffered(data + payload_offset, size);
+  cw.write_unbuffered(data + payload_offset, size - sizeof(JfrCheckpointEntry));
 }
 
 static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) {
   assert(data != NULL, "invariant");
-  const intptr_t previous_checkpoint_event = cw.previous_checkpoint_offset();
-  const intptr_t event_begin = cw.current_offset();
-  const intptr_t offset_to_previous_checkpoint_event = 0 == previous_checkpoint_event ? 0 : previous_checkpoint_event - event_begin;
-  const jlong total_checkpoint_size = total_size(data);
-  write_checkpoint_header(cw, offset_to_previous_checkpoint_event, data);
-  write_checkpoint_content(cw, data, total_checkpoint_size - sizeof(JfrCheckpointEntry));
-  const jlong checkpoint_event_size = cw.current_offset() - event_begin;
-  cw.write_padded_at_offset<u4>(checkpoint_event_size, event_begin);
-  cw.set_previous_checkpoint_offset(event_begin);
-  return (size_t)total_checkpoint_size;
+  const int64_t event_begin = cw.current_offset();
+  const int64_t last_checkpoint_event = cw.last_checkpoint_offset();
+  const int64_t delta = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin;
+  const int64_t checkpoint_size = total_size(data);
+  write_checkpoint_header(cw, delta, data);
+  write_checkpoint_content(cw, data, checkpoint_size);
+  const int64_t event_size = cw.current_offset() - event_begin;
+  cw.write_padded_at_offset<u4>(event_size, event_begin);
+  cw.set_last_checkpoint_offset(event_begin);
+  return (size_t)checkpoint_size;
 }
 
 static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) {
@@ -290,14 +273,14 @@
   assert(data != NULL, "invariant");
   assert(size > 0, "invariant");
   const u1* const limit = data + size;
-  const u1* next_entry = data;
+  const u1* next = data;
   size_t processed = 0;
-  while (next_entry < limit) {
-    const size_t checkpoint_size = write_checkpoint_event(cw, next_entry);
+  while (next < limit) {
+    const size_t checkpoint_size = write_checkpoint_event(cw, next);
     processed += checkpoint_size;
-    next_entry += checkpoint_size;
+    next += checkpoint_size;
   }
-  assert(next_entry == limit, "invariant");
+  assert(next == limit, "invariant");
   return processed;
 }
 
@@ -331,6 +314,12 @@
   return wo.processed();
 }
 
+void JfrCheckpointManager::synchronize_epoch() {
+  assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant");
+  OrderAccess::storestore();
+  _checkpoint_epoch_state = JfrTraceIdEpoch::epoch();
+}
+
 size_t JfrCheckpointManager::write() {
   const size_t processed = write_mspace<MutexedWriteOp, CompositeOperation>(_free_list_mspace, _chunkwriter);
   synchronize_epoch();
@@ -372,10 +361,16 @@
   JfrTypeManager::write_type_set_for_unloaded_classes();
 }
 
-void JfrCheckpointManager::create_thread_checkpoint(JavaThread* jt) {
-  JfrTypeManager::create_thread_checkpoint(jt);
+void JfrCheckpointManager::create_thread_blob(JavaThread* jt) {
+  JfrTypeManager::create_thread_blob(jt);
 }
 
 void JfrCheckpointManager::write_thread_checkpoint(JavaThread* jt) {
   JfrTypeManager::write_thread_checkpoint(jt);
 }
+
+void JfrCheckpointManager::shift_epoch() {
+  debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
+  JfrTraceIdEpoch::shift_epoch();
+  assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
+}
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -92,7 +92,7 @@
  public:
   void register_service_thread(const Thread* t);
   static void write_type_set_for_unloaded_classes();
-  static void create_thread_checkpoint(JavaThread* jt);
+  static void create_thread_blob(JavaThread* jt);
   static void write_thread_checkpoint(JavaThread* jt);
 
   friend class JfrRecorder;
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
 #include "jfr/writers/jfrBigEndianWriter.hpp"
 
 JfrCheckpointFlush::JfrCheckpointFlush(Type* old, size_t used, size_t requested, Thread* t) :
@@ -126,7 +127,7 @@
   write_padded_at_offset(nof_entries, offset);
 }
 
-const u1* JfrCheckpointWriter::session_data(size_t* size, const JfrCheckpointContext* ctx /* 0 */) {
+const u1* JfrCheckpointWriter::session_data(size_t* size, bool move /* false */, const JfrCheckpointContext* ctx /* 0 */) {
   assert(this->is_acquired(), "wrong state!");
   if (!this->is_valid()) {
     *size = 0;
@@ -140,8 +141,10 @@
   *size = this->used_size();
   assert(this->start_pos() + *size == this->current_pos(), "invariant");
   write_checkpoint_header(const_cast<u1*>(this->start_pos()), this->used_offset(), _time, is_flushpoint(), count());
-  this->seek(_offset + (_header ? sizeof(JfrCheckpointEntry) : 0));
-  set_count(0);
+  _header = false; // the header was just written
+  if (move) {
+    this->seek(_offset);
+  }
   return this->start_pos();
 }
 
@@ -160,26 +163,19 @@
   return this->used_size() > sizeof(JfrCheckpointEntry);
 }
 
-JfrCheckpointBlobHandle JfrCheckpointWriter::checkpoint_blob() {
+JfrBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
   size_t size = 0;
-  const u1* data = session_data(&size);
-  return JfrCheckpointBlob::make(data, size);
+  const u1* data = session_data(&size, false, ctx);
+  return JfrBlob::make(data, size);
 }
 
-JfrCheckpointBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
-  if (ctx == NULL) {
-    return checkpoint_blob();
-  }
+JfrBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) {
   size_t size = 0;
-  const u1* data = session_data(&size, ctx);
-  return JfrCheckpointBlob::make(data, size);
-}
-
-JfrCheckpointBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) {
-  JfrCheckpointBlobHandle data = copy(ctx);
+  const u1* data = session_data(&size, true, ctx);
+  JfrBlobHandle blob = JfrBlob::make(data, size);
   if (ctx != NULL) {
     const_cast<JfrCheckpointContext*>(ctx)->count = 0;
     set_context(*ctx);
   }
-  return data;
+  return blob;
 }
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -25,8 +25,8 @@
 #ifndef SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
 #define SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
 
-#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
 #include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
 #include "jfr/utilities/jfrTime.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 #include "jfr/writers/jfrEventWriterHost.inline.hpp"
@@ -67,9 +67,8 @@
   void increment();
   void set_flushpoint(bool flushpoint);
   bool is_flushpoint() const;
-  const u1* session_data(size_t* size, const JfrCheckpointContext* ctx = NULL);
+  const u1* session_data(size_t* size, bool move = false, const JfrCheckpointContext* ctx = NULL);
   void release();
-
  public:
   JfrCheckpointWriter(bool flushpoint, bool header, Thread* thread);
   ~JfrCheckpointWriter();
@@ -80,9 +79,8 @@
   const JfrCheckpointContext context() const;
   void set_context(const JfrCheckpointContext ctx);
   bool has_data() const;
-  JfrCheckpointBlobHandle checkpoint_blob();
-  JfrCheckpointBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
-  JfrCheckpointBlobHandle move(const JfrCheckpointContext* ctx = NULL);
+  JfrBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
+  JfrBlobHandle move(const JfrCheckpointContext* ctx = NULL);
 };
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -30,7 +30,6 @@
 #include "gc/shared/gcName.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcWhen.hpp"
-#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 #include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/types/jfrType.hpp"
@@ -274,34 +273,26 @@
 
 class TypeSetSerialization {
  private:
+  JfrCheckpointWriter* _leakp_writer;
   bool _class_unload;
  public:
-  explicit TypeSetSerialization(bool class_unload) : _class_unload(class_unload) {}
-  void write(JfrCheckpointWriter& writer, JfrCheckpointWriter* leakp_writer) {
-    JfrTypeSet::serialize(&writer, leakp_writer, _class_unload);
+  TypeSetSerialization(bool class_unload, JfrCheckpointWriter* leakp_writer = NULL) :
+    _leakp_writer(leakp_writer), _class_unload(class_unload) {}
+  void write(JfrCheckpointWriter& writer) {
+    JfrTypeSet::serialize(&writer, _leakp_writer, _class_unload);
   }
 };
 
 void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
   TypeSetSerialization type_set(true);
-  if (LeakProfiler::is_running()) {
-    JfrCheckpointWriter leakp_writer(false, true, Thread::current());
-    type_set.write(writer, &leakp_writer);
-    ObjectSampleCheckpoint::install(leakp_writer, true, true);
-    return;
-  }
-  type_set.write(writer, NULL);
+  type_set.write(writer);
 };
 
+TypeSet::TypeSet(JfrCheckpointWriter* leakp_writer) : _leakp_writer(leakp_writer) {}
+
 void TypeSet::serialize(JfrCheckpointWriter& writer) {
-  TypeSetSerialization type_set(false);
-  if (LeakProfiler::is_running()) {
-    JfrCheckpointWriter leakp_writer(false, true, Thread::current());
-    type_set.write(writer, &leakp_writer);
-    ObjectSampleCheckpoint::install(leakp_writer, false, true);
-    return;
-  }
-  type_set.write(writer, NULL);
+  TypeSetSerialization type_set(false, _leakp_writer);
+  type_set.write(writer);
 };
 
 void ThreadStateConstant::serialize(JfrCheckpointWriter& writer) {
@@ -312,7 +303,6 @@
   assert(_thread != NULL, "invariant");
   assert(_thread == Thread::current(), "invariant");
   assert(_thread->is_Java_thread(), "invariant");
-  assert(!_thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
   ResourceMark rm(_thread);
   const oop threadObj = _thread->threadObj();
   assert(threadObj != NULL, "invariant");
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -108,7 +108,10 @@
 };
 
 class TypeSet : public JfrSerializer {
+ private:
+  JfrCheckpointWriter* _leakp_writer;
  public:
+  explicit TypeSet(JfrCheckpointWriter* leakp_writer = NULL);
   void serialize(JfrCheckpointWriter& writer);
 };
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -23,12 +23,17 @@
  */
 
 #include "precompiled.hpp"
+#include "jfr/jfr.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 #include "jfr/metadata/jfrSerializer.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
 #include "jfr/recorder/checkpoint/types/jfrType.hpp"
 #include "jfr/recorder/checkpoint/types/jfrTypeManager.hpp"
 #include "jfr/utilities/jfrDoublyLinkedList.hpp"
 #include "jfr/utilities/jfrIterator.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/exceptions.hpp"
@@ -39,7 +44,7 @@
   JfrSerializerRegistration* _next;
   JfrSerializerRegistration* _prev;
   JfrSerializer* _serializer;
-  mutable JfrCheckpointBlobHandle _cache;
+  mutable JfrBlobHandle _cache;
   JfrTypeId _id;
   bool _permit_cache;
 
@@ -148,45 +153,59 @@
 }
 
 void JfrTypeManager::write_type_set() {
-  // can safepoint here because of Module_lock
-  MutexLocker cld_lock(SafepointSynchronize::is_at_safepoint() ? NULL : ClassLoaderDataGraph_lock);
-  MutexLocker lock(SafepointSynchronize::is_at_safepoint() ? NULL : Module_lock);
-
-  JfrCheckpointWriter writer(true, true, Thread::current());
-  TypeSet set;
+  assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
+  // can safepoint here
+  MutexLocker cld_lock(ClassLoaderDataGraph_lock);
+  MutexLocker module_lock(Module_lock);
+  if (!LeakProfiler::is_running()) {
+    JfrCheckpointWriter writer(true, true, Thread::current());
+    TypeSet set;
+    set.serialize(writer);
+    return;
+  }
+  JfrCheckpointWriter leakp_writer(false, true, Thread::current());
+  JfrCheckpointWriter writer(false, true, Thread::current());
+  TypeSet set(&leakp_writer);
   set.serialize(writer);
+  ObjectSampleCheckpoint::on_type_set(leakp_writer);
 }
 
 void JfrTypeManager::write_type_set_for_unloaded_classes() {
   assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   JfrCheckpointWriter writer(false, true, Thread::current());
+  const JfrCheckpointContext ctx = writer.context();
   ClassUnloadTypeSet class_unload_set;
   class_unload_set.serialize(writer);
+  if (LeakProfiler::is_running()) {
+    ObjectSampleCheckpoint::on_type_set_unload(writer);
+  }
+  if (!Jfr::is_recording()) {
+    // discard anything written
+    writer.set_context(ctx);
+  }
 }
 
-void JfrTypeManager::create_thread_checkpoint(JavaThread* jt) {
+void JfrTypeManager::create_thread_blob(JavaThread* jt) {
   assert(jt != NULL, "invariant");
+  ResourceMark rm(jt);
+  HandleMark hm(jt);
   JfrThreadConstant type_thread(jt);
   JfrCheckpointWriter writer(false, true, jt);
   writer.write_type(TYPE_THREAD);
   type_thread.serialize(writer);
   // create and install a checkpoint blob
-  jt->jfr_thread_local()->set_thread_checkpoint(writer.checkpoint_blob());
-  assert(jt->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+  jt->jfr_thread_local()->set_thread_blob(writer.move());
+  assert(jt->jfr_thread_local()->has_thread_blob(), "invariant");
 }
 
 void JfrTypeManager::write_thread_checkpoint(JavaThread* jt) {
   assert(jt != NULL, "JavaThread is NULL!");
   ResourceMark rm(jt);
-  if (jt->jfr_thread_local()->has_thread_checkpoint()) {
-    JfrCheckpointWriter writer(false, false, jt);
-    jt->jfr_thread_local()->thread_checkpoint()->write(writer);
-  } else {
-    JfrThreadConstant type_thread(jt);
-    JfrCheckpointWriter writer(false, true, jt);
-    writer.write_type(TYPE_THREAD);
-    type_thread.serialize(writer);
-  }
+  HandleMark hm(jt);
+  JfrThreadConstant type_thread(jt);
+  JfrCheckpointWriter writer(false, true, jt);
+  writer.write_type(TYPE_THREAD);
+  type_thread.serialize(writer);
 }
 
 #ifdef ASSERT
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -37,7 +37,7 @@
   static void write_safepoint_types(JfrCheckpointWriter& writer);
   static void write_type_set();
   static void write_type_set_for_unloaded_classes();
-  static void create_thread_checkpoint(JavaThread* jt);
+  static void create_thread_blob(JavaThread* jt);
   static void write_thread_checkpoint(JavaThread* jt);
 };
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,32 +28,22 @@
 #include "classfile/moduleEntry.hpp"
 #include "classfile/packageEntry.hpp"
 #include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
 #include "jfr/jfr.hpp"
 #include "jfr/jni/jfrGetAllEventClasses.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 #include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
 #include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
-#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp"
 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
-#include "jfr/recorder/storage/jfrBuffer.hpp"
 #include "jfr/utilities/jfrHashtable.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
+#include "jfr/writers/jfrTypeWriterHost.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
-#include "memory/resourceArea.hpp"
 #include "utilities/accessFlags.hpp"
 
-// incremented on each checkpoint
-static u8 checkpoint_id = 0;
-
-// creates a unique id by combining a checkpoint relative symbol id (2^24)
-// with the current checkpoint id (2^40)
-#define CREATE_SYMBOL_ID(sym_id) (((u8)((checkpoint_id << 24) | sym_id)))
-
 typedef const Klass* KlassPtr;
 typedef const PackageEntry* PkgPtr;
 typedef const ModuleEntry* ModPtr;
@@ -63,57 +53,112 @@
 typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr;
 typedef const JfrSymbolId::CStringEntry* CStringEntryPtr;
 
-static traceid module_id(PkgPtr pkg) {
-  assert(pkg != NULL, "invariant");
-  ModPtr module_entry = pkg->module();
-  return module_entry != NULL && module_entry->is_named() ? TRACE_ID(module_entry) : 0;
+static JfrCheckpointWriter* _writer = NULL;
+static JfrCheckpointWriter* _leakp_writer = NULL;
+static JfrArtifactSet* _artifacts = NULL;
+static JfrArtifactClosure* _subsystem_callback = NULL;
+static bool _class_unload = false;
+static bool _flushpoint = false;
+
+// incremented on each rotation
+static u8 checkpoint_id = 1;
+
+// creates a unique id by combining a checkpoint relative symbol id (2^24)
+// with the current checkpoint id (2^40)
+#define CREATE_SYMBOL_ID(sym_id) (((u8)((checkpoint_id << 24) | sym_id)))
+
+static traceid create_symbol_id(traceid artifact_id) {
+  return artifact_id != 0 ? CREATE_SYMBOL_ID(artifact_id) : 0;
+}
+
+static bool current_epoch() {
+  return _class_unload;
 }
 
-static traceid package_id(KlassPtr klass) {
-  assert(klass != NULL, "invariant");
-  PkgPtr pkg_entry = klass->package();
-  return pkg_entry == NULL ? 0 : TRACE_ID(pkg_entry);
+static bool previous_epoch() {
+  return !current_epoch();
+}
+
+static bool is_complete() {
+  return !_artifacts->has_klass_entries() && current_epoch();
+}
+
+static traceid mark_symbol(KlassPtr klass, bool leakp) {
+  return klass != NULL ? create_symbol_id(_artifacts->mark(klass, leakp)) : 0;
 }
 
-static traceid cld_id(CldPtr cld) {
-  assert(cld != NULL, "invariant");
-  return cld->is_unsafe_anonymous() ? 0 : TRACE_ID(cld);
+static traceid mark_symbol(Symbol* symbol, bool leakp) {
+  return symbol != NULL ? create_symbol_id(_artifacts->mark(symbol, leakp)) : 0;
+}
+
+static traceid get_bootstrap_name(bool leakp) {
+  return create_symbol_id(_artifacts->bootstrap_name(leakp));
+}
+
+template <typename T>
+static traceid artifact_id(const T* ptr) {
+  assert(ptr != NULL, "invariant");
+  return TRACE_ID(ptr);
 }
 
-static void tag_leakp_klass_artifacts(KlassPtr k, bool class_unload) {
-  assert(k != NULL, "invariant");
-  PkgPtr pkg = k->package();
-  if (pkg != NULL) {
-    tag_leakp_artifact(pkg, class_unload);
-    ModPtr module = pkg->module();
-    if (module != NULL) {
-      tag_leakp_artifact(module, class_unload);
-    }
+static traceid package_id(KlassPtr klass, bool leakp) {
+  assert(klass != NULL, "invariant");
+  PkgPtr pkg_entry = klass->package();
+  if (pkg_entry == NULL) {
+    return 0;
+  }
+  if (leakp) {
+    SET_LEAKP(pkg_entry);
   }
-  CldPtr cld = k->class_loader_data();
-  assert(cld != NULL, "invariant");
-  if (!cld->is_unsafe_anonymous()) {
-    tag_leakp_artifact(cld, class_unload);
+  // package implicitly tagged already
+  return artifact_id(pkg_entry);
+}
+
+static traceid module_id(PkgPtr pkg, bool leakp) {
+  assert(pkg != NULL, "invariant");
+  ModPtr module_entry = pkg->module();
+  if (module_entry == NULL || !module_entry->is_named()) {
+    return 0;
   }
+  if (leakp) {
+    SET_LEAKP(module_entry);
+  } else {
+    SET_TRANSIENT(module_entry);
+  }
+  return artifact_id(module_entry);
 }
 
-class TagLeakpKlassArtifact {
-  bool _class_unload;
- public:
-  TagLeakpKlassArtifact(bool class_unload) : _class_unload(class_unload) {}
-  bool operator()(KlassPtr klass) {
-    if (_class_unload) {
-      if (LEAKP_USED_THIS_EPOCH(klass)) {
-        tag_leakp_klass_artifacts(klass, _class_unload);
-      }
-    } else {
-      if (LEAKP_USED_PREV_EPOCH(klass)) {
-        tag_leakp_klass_artifacts(klass, _class_unload);
-      }
-    }
-    return true;
+static traceid method_id(KlassPtr klass, MethodPtr method) {
+  assert(klass != NULL, "invariant");
+  assert(method != NULL, "invariant");
+  return METHOD_ID(klass, method);
+}
+
+static traceid cld_id(CldPtr cld, bool leakp) {
+  assert(cld != NULL, "invariant");
+  if (cld->is_unsafe_anonymous()) {
+    return 0;
+  }
+  if (leakp) {
+    SET_LEAKP(cld);
+  } else {
+    SET_TRANSIENT(cld);
   }
-};
+  return artifact_id(cld);
+}
+
+template <typename T>
+static s4 get_flags(const T* ptr) {
+  assert(ptr != NULL, "invariant");
+  return ptr->access_flags().get_flags();
+}
+
+template <typename T>
+static void set_serialized(const T* ptr) {
+  assert(ptr != NULL, "invariant");
+  SET_SERIALIZED(ptr);
+  assert(IS_SERIALIZED(ptr), "invariant");
+}
 
 /*
  * In C++03, functions used as template parameters must have external linkage;
@@ -123,11 +168,10 @@
  * The weird naming is an effort to decrease the risk of name clashes.
  */
 
-int write__artifact__klass(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
+static int write_klass(JfrCheckpointWriter* writer, KlassPtr klass, bool leakp) {
   assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  assert(k != NULL, "invariant");
-  KlassPtr klass = (KlassPtr)k;
+  assert(_artifacts != NULL, "invariant");
+  assert(klass != NULL, "invariant");
   traceid pkg_id = 0;
   KlassPtr theklass = klass;
   if (theklass->is_objArray_klass()) {
@@ -135,445 +179,167 @@
     theklass = obj_arr_klass->bottom_klass();
   }
   if (theklass->is_instance_klass()) {
-    pkg_id = package_id(theklass);
+    pkg_id = package_id(theklass, leakp);
   } else {
     assert(theklass->is_typeArray_klass(), "invariant");
   }
-  const traceid symbol_id = artifacts->mark(klass);
-  assert(symbol_id > 0, "need to have an address for symbol!");
-  writer->write(TRACE_ID(klass));
-  writer->write(cld_id(klass->class_loader_data()));
-  writer->write((traceid)CREATE_SYMBOL_ID(symbol_id));
+  writer->write(artifact_id(klass));
+  writer->write(cld_id(klass->class_loader_data(), leakp));
+  writer->write(mark_symbol(klass, leakp));
   writer->write(pkg_id);
-  writer->write((s4)klass->access_flags().get_flags());
+  writer->write(get_flags(klass));
   return 1;
 }
 
-typedef LeakPredicate<KlassPtr> LeakKlassPredicate;
-typedef JfrPredicatedArtifactWriterImplHost<KlassPtr, LeakKlassPredicate, write__artifact__klass> LeakKlassWriterImpl;
-typedef JfrArtifactWriterHost<LeakKlassWriterImpl, TYPE_CLASS> LeakKlassWriter;
-typedef JfrArtifactWriterImplHost<KlassPtr, write__artifact__klass> KlassWriterImpl;
-typedef JfrArtifactWriterHost<KlassWriterImpl, TYPE_CLASS> KlassWriter;
+int write__klass(JfrCheckpointWriter* writer, const void* k) {
+  assert(k != NULL, "invariant");
+  KlassPtr klass = (KlassPtr)k;
+  set_serialized(klass);
+  return write_klass(writer, klass, false);
+}
+
+int write__klass__leakp(JfrCheckpointWriter* writer, const void* k) {
+  assert(k != NULL, "invariant");
+  KlassPtr klass = (KlassPtr)k;
+  return write_klass(writer, klass, true);
+}
+
+static void do_implied(Klass* klass) {
+  assert(klass != NULL, "invariant");
+  if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
+    if (_leakp_writer != NULL) {
+      SET_LEAKP(klass);
+    }
+    _subsystem_callback->do_artifact(klass);
+  }
+}
 
-int write__artifact__method(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) {
-  assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  assert(m != NULL, "invariant");
-  MethodPtr method = (MethodPtr)m;
-  const traceid method_name_symbol_id = artifacts->mark(method->name());
-  assert(method_name_symbol_id > 0, "invariant");
-  const traceid method_sig_symbol_id = artifacts->mark(method->signature());
-  assert(method_sig_symbol_id > 0, "invariant");
-  KlassPtr klass = method->method_holder();
+static void do_unloaded_klass(Klass* klass) {
+  assert(klass != NULL, "invariant");
+  assert(_subsystem_callback != NULL, "invariant");
+  if (IS_JDK_JFR_EVENT_SUBKLASS(klass)) {
+    JfrEventClasses::increment_unloaded_event_class();
+  }
+  if (USED_THIS_EPOCH(klass)) {
+    ObjectSampleCheckpoint::on_klass_unload(klass);
+    _subsystem_callback->do_artifact(klass);
+    return;
+  }
+  do_implied(klass);
+}
+
+static void do_klass(Klass* klass) {
   assert(klass != NULL, "invariant");
-  assert(METHOD_USED_ANY_EPOCH(klass), "invariant");
-  writer->write((u8)METHOD_ID(klass, method));
-  writer->write((u8)TRACE_ID(klass));
-  writer->write((u8)CREATE_SYMBOL_ID(method_name_symbol_id));
-  writer->write((u8)CREATE_SYMBOL_ID(method_sig_symbol_id));
-  writer->write((u2)method->access_flags().get_flags());
-  writer->write(const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0);
-  return 1;
+  assert(_subsystem_callback != NULL, "invariant");
+  if (current_epoch()) {
+    if (USED_THIS_EPOCH(klass)) {
+      _subsystem_callback->do_artifact(klass);
+      return;
+    }
+  } else {
+    if (USED_PREV_EPOCH(klass)) {
+      _subsystem_callback->do_artifact(klass);
+      return;
+    }
+  }
+  do_implied(klass);
+}
+
+static void do_klasses() {
+  if (_class_unload) {
+    ClassLoaderDataGraph::classes_unloading_do(&do_unloaded_klass);
+    return;
+  }
+  ClassLoaderDataGraph::classes_do(&do_klass);
 }
 
-typedef JfrArtifactWriterImplHost<MethodPtr, write__artifact__method> MethodWriterImplTarget;
-typedef JfrArtifactWriterHost<MethodWriterImplTarget, TYPE_METHOD> MethodWriterImpl;
+typedef SerializePredicate<KlassPtr> KlassPredicate;
+typedef JfrPredicatedTypeWriterImplHost<KlassPtr, KlassPredicate, write__klass> KlassWriterImpl;
+typedef JfrTypeWriterHost<KlassWriterImpl, TYPE_CLASS> KlassWriter;
+typedef CompositeFunctor<KlassPtr, KlassWriter, KlassArtifactRegistrator> KlassWriterRegistration;
+typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
+
+typedef LeakPredicate<KlassPtr> LeakKlassPredicate;
+typedef JfrPredicatedTypeWriterImplHost<KlassPtr, LeakKlassPredicate, write__klass__leakp> LeakKlassWriterImpl;
+typedef JfrTypeWriterHost<LeakKlassWriterImpl, TYPE_CLASS> LeakKlassWriter;
+
+typedef CompositeFunctor<KlassPtr, LeakKlassWriter, KlassWriter> CompositeKlassWriter;
+typedef CompositeFunctor<KlassPtr, CompositeKlassWriter, KlassArtifactRegistrator> CompositeKlassWriterRegistration;
+typedef JfrArtifactCallbackHost<KlassPtr, CompositeKlassWriterRegistration> CompositeKlassCallback;
 
-int write__artifact__package(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* p) {
+static bool write_klasses() {
+  assert(!_artifacts->has_klass_entries(), "invariant");
+  assert(_writer != NULL, "invariant");
+  KlassArtifactRegistrator reg(_artifacts);
+  KlassWriter kw(_writer, _class_unload);
+  KlassWriterRegistration kwr(&kw, &reg);
+  if (_leakp_writer == NULL) {
+    KlassCallback callback(&kwr);
+    _subsystem_callback = &callback;
+    do_klasses();
+  } else {
+    LeakKlassWriter lkw(_leakp_writer, _artifacts, _class_unload);
+    CompositeKlassWriter ckw(&lkw, &kw);
+    CompositeKlassWriterRegistration ckwr(&ckw, &reg);
+    CompositeKlassCallback callback(&ckwr);
+    _subsystem_callback = &callback;
+    do_klasses();
+  }
+  if (is_complete()) {
+    return false;
+  }
+  _artifacts->tally(kw);
+  return true;
+}
+
+template <typename T>
+static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
+  assert(callback != NULL, "invariant");
+  assert(value != NULL, "invariant");
+  if (USED_PREV_EPOCH(value)) {
+    callback->do_artifact(value);
+    assert(IS_NOT_SERIALIZED(value), "invariant");
+    return;
+  }
+  if (IS_SERIALIZED(value)) {
+    CLEAR_SERIALIZED(value);
+  }
+  assert(IS_NOT_SERIALIZED(value), "invariant");
+}
+
+static int write_package(JfrCheckpointWriter* writer, PkgPtr pkg, bool leakp) {
   assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  assert(p != NULL, "invariant");
-  PkgPtr pkg = (PkgPtr)p;
-  Symbol* const pkg_name = pkg->name();
-  const traceid package_name_symbol_id = pkg_name != NULL ? artifacts->mark(pkg_name) : 0;
-  assert(package_name_symbol_id > 0, "invariant");
-  writer->write((traceid)TRACE_ID(pkg));
-  writer->write((traceid)CREATE_SYMBOL_ID(package_name_symbol_id));
-  writer->write(module_id(pkg));
+  assert(_artifacts != NULL, "invariant");
+  assert(pkg != NULL, "invariant");
+  writer->write(artifact_id(pkg));
+  writer->write(mark_symbol(pkg->name(), leakp));
+  writer->write(module_id(pkg, leakp));
   writer->write((bool)pkg->is_exported());
   return 1;
 }
 
-typedef LeakPredicate<PkgPtr> LeakPackagePredicate;
-int _compare_pkg_ptr_(PkgPtr const& lhs, PkgPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
-typedef UniquePredicate<PkgPtr, _compare_pkg_ptr_> PackagePredicate;
-typedef JfrPredicatedArtifactWriterImplHost<PkgPtr, LeakPackagePredicate, write__artifact__package> LeakPackageWriterImpl;
-typedef JfrPredicatedArtifactWriterImplHost<PkgPtr, PackagePredicate, write__artifact__package> PackageWriterImpl;
-typedef JfrArtifactWriterHost<LeakPackageWriterImpl, TYPE_PACKAGE> LeakPackageWriter;
-typedef JfrArtifactWriterHost<PackageWriterImpl, TYPE_PACKAGE> PackageWriter;
-
-int write__artifact__module(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* m) {
-  assert( m != NULL, "invariant");
-  ModPtr entry = (ModPtr)m;
-  Symbol* const module_name = entry->name();
-  const traceid module_name_symbol_id = module_name != NULL ? artifacts->mark(module_name) : 0;
-  Symbol* const module_version = entry->version();
-  const traceid module_version_symbol_id = module_version != NULL ? artifacts->mark(module_version) : 0;
-  Symbol* const module_location = entry->location();
-  const traceid module_location_symbol_id = module_location != NULL ? artifacts->mark(module_location) : 0;
-  writer->write((traceid)TRACE_ID(entry));
-  writer->write(module_name_symbol_id == 0 ? (traceid)0 : (traceid)CREATE_SYMBOL_ID(module_name_symbol_id));
-  writer->write(module_version_symbol_id == 0 ? (traceid)0 : (traceid)CREATE_SYMBOL_ID(module_version_symbol_id));
-  writer->write(module_location_symbol_id == 0 ? (traceid)0 : (traceid)CREATE_SYMBOL_ID(module_location_symbol_id));
-  writer->write(cld_id(entry->loader_data()));
-  return 1;
-}
-
-typedef LeakPredicate<ModPtr> LeakModulePredicate;
-int _compare_mod_ptr_(ModPtr const& lhs, ModPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
-typedef UniquePredicate<ModPtr, _compare_mod_ptr_> ModulePredicate;
-typedef JfrPredicatedArtifactWriterImplHost<ModPtr, LeakModulePredicate, write__artifact__module> LeakModuleWriterImpl;
-typedef JfrPredicatedArtifactWriterImplHost<ModPtr, ModulePredicate, write__artifact__module> ModuleWriterImpl;
-typedef JfrArtifactWriterHost<LeakModuleWriterImpl, TYPE_MODULE> LeakModuleWriter;
-typedef JfrArtifactWriterHost<ModuleWriterImpl, TYPE_MODULE> ModuleWriter;
-
-int write__artifact__classloader(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* c) {
-  assert(c != NULL, "invariant");
-  CldPtr cld = (CldPtr)c;
-  assert(!cld->is_unsafe_anonymous(), "invariant");
-  const traceid cld_id = TRACE_ID(cld);
-  // class loader type
-  const Klass* class_loader_klass = cld->class_loader_klass();
-  if (class_loader_klass == NULL) {
-    // (primordial) boot class loader
-    writer->write(cld_id); // class loader instance id
-    writer->write((traceid)0);  // class loader type id (absence of)
-    writer->write((traceid)CREATE_SYMBOL_ID(1)); // 1 maps to synthetic name -> "bootstrap"
-  } else {
-    Symbol* symbol_name = cld->name();
-    const traceid symbol_name_id = symbol_name != NULL ? artifacts->mark(symbol_name) : 0;
-    writer->write(cld_id); // class loader instance id
-    writer->write(TRACE_ID(class_loader_klass)); // class loader type id
-    writer->write(symbol_name_id == 0 ? (traceid)0 :
-      (traceid)CREATE_SYMBOL_ID(symbol_name_id)); // class loader instance name
-  }
-  return 1;
-}
-
-typedef LeakPredicate<CldPtr> LeakCldPredicate;
-int _compare_cld_ptr_(CldPtr const& lhs, CldPtr const& rhs) { return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0; }
-typedef UniquePredicate<CldPtr, _compare_cld_ptr_> CldPredicate;
-typedef JfrPredicatedArtifactWriterImplHost<CldPtr, LeakCldPredicate, write__artifact__classloader> LeakCldWriterImpl;
-typedef JfrPredicatedArtifactWriterImplHost<CldPtr, CldPredicate, write__artifact__classloader> CldWriterImpl;
-typedef JfrArtifactWriterHost<LeakCldWriterImpl, TYPE_CLASSLOADER> LeakCldWriter;
-typedef JfrArtifactWriterHost<CldWriterImpl, TYPE_CLASSLOADER> CldWriter;
-
-typedef const JfrSymbolId::SymbolEntry* SymbolEntryPtr;
-
-static int write__artifact__symbol__entry__(JfrCheckpointWriter* writer,
-                                            SymbolEntryPtr entry) {
-  assert(writer != NULL, "invariant");
-  assert(entry != NULL, "invariant");
-  ResourceMark rm;
-  writer->write(CREATE_SYMBOL_ID(entry->id()));
-  writer->write(entry->value()->as_C_string());
-  return 1;
-}
-
-int write__artifact__symbol__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) {
-  assert(e != NULL, "invariant");
-  return write__artifact__symbol__entry__(writer, (SymbolEntryPtr)e);
-}
-
-typedef JfrArtifactWriterImplHost<SymbolEntryPtr, write__artifact__symbol__entry> SymbolEntryWriterImpl;
-typedef JfrArtifactWriterHost<SymbolEntryWriterImpl, TYPE_SYMBOL> SymbolEntryWriter;
-
-typedef const JfrSymbolId::CStringEntry* CStringEntryPtr;
-
-static int write__artifact__cstring__entry__(JfrCheckpointWriter* writer, CStringEntryPtr entry) {
-  assert(writer != NULL, "invariant");
-  assert(entry != NULL, "invariant");
-  writer->write(CREATE_SYMBOL_ID(entry->id()));
-  writer->write(entry->value());
-  return 1;
-}
-
-int write__artifact__cstring__entry(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* e) {
-  assert(e != NULL, "invariant");
-  return write__artifact__cstring__entry__(writer, (CStringEntryPtr)e);
-}
-
-typedef JfrArtifactWriterImplHost<CStringEntryPtr, write__artifact__cstring__entry> CStringEntryWriterImpl;
-typedef JfrArtifactWriterHost<CStringEntryWriterImpl, TYPE_SYMBOL> CStringEntryWriter;
-
-int write__artifact__klass__symbol(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* k) {
-  assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invaiant");
-  assert(k != NULL, "invariant");
-  const InstanceKlass* const ik = (const InstanceKlass*)k;
-  if (ik->is_unsafe_anonymous()) {
-    CStringEntryPtr entry =
-      artifacts->map_cstring(JfrSymbolId::unsafe_anonymous_klass_name_hash_code(ik));
-    assert(entry != NULL, "invariant");
-    return write__artifact__cstring__entry__(writer, entry);
-  }
-
-  SymbolEntryPtr entry = artifacts->map_symbol(JfrSymbolId::regular_klass_name_hash_code(ik));
-  return write__artifact__symbol__entry__(writer, entry);
-}
-
-int _compare_traceid_(const traceid& lhs, const traceid& rhs) {
-  return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
+int write__package(JfrCheckpointWriter* writer, const void* p) {
+  assert(p != NULL, "invariant");
+  PkgPtr pkg = (PkgPtr)p;
+  set_serialized(pkg);
+  return write_package(writer, pkg, false);
 }
 
-template <template <typename> class Predicate>
-class KlassSymbolWriterImpl {
- private:
-  JfrCheckpointWriter* _writer;
-  JfrArtifactSet* _artifacts;
-  Predicate<KlassPtr> _predicate;
-  MethodUsedPredicate<true> _method_used_predicate;
-  MethodFlagPredicate _method_flag_predicate;
-  UniquePredicate<traceid, _compare_traceid_> _unique_predicate;
-
-  int klass_symbols(KlassPtr klass);
-  int package_symbols(PkgPtr pkg);
-  int module_symbols(ModPtr module);
-  int class_loader_symbols(CldPtr cld);
-  int method_symbols(KlassPtr klass);
-
- public:
-  typedef KlassPtr Type;
-  KlassSymbolWriterImpl(JfrCheckpointWriter* writer,
-                        JfrArtifactSet* artifacts,
-                        bool class_unload) : _writer(writer),
-                                             _artifacts(artifacts),
-                                             _predicate(class_unload),
-                                             _method_used_predicate(class_unload),
-                                             _method_flag_predicate(class_unload),
-                                             _unique_predicate(class_unload) {}
-
-  int operator()(KlassPtr klass) {
-    assert(klass != NULL, "invariant");
-    int count = 0;
-    if (_predicate(klass)) {
-      count += klass_symbols(klass);
-      PkgPtr pkg = klass->package();
-      if (pkg != NULL) {
-        count += package_symbols(pkg);
-        ModPtr module = pkg->module();
-        if (module != NULL && module->is_named()) {
-          count += module_symbols(module);
-        }
-      }
-      CldPtr cld = klass->class_loader_data();
-      assert(cld != NULL, "invariant");
-      if (!cld->is_unsafe_anonymous()) {
-        count += class_loader_symbols(cld);
-      }
-      if (_method_used_predicate(klass)) {
-        count += method_symbols(klass);
-      }
-    }
-    return count;
-  }
-};
-
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::klass_symbols(KlassPtr klass) {
-  assert(klass != NULL, "invariant");
-  assert(_predicate(klass), "invariant");
-  const InstanceKlass* const ik = (const InstanceKlass*)klass;
-  if (ik->is_unsafe_anonymous()) {
-    CStringEntryPtr entry =
-      this->_artifacts->map_cstring(JfrSymbolId::unsafe_anonymous_klass_name_hash_code(ik));
-    assert(entry != NULL, "invariant");
-    return _unique_predicate(entry->id()) ? write__artifact__cstring__entry__(this->_writer, entry) : 0;
-  }
-  SymbolEntryPtr entry = this->_artifacts->map_symbol(ik->name());
-  assert(entry != NULL, "invariant");
-  return _unique_predicate(entry->id()) ? write__artifact__symbol__entry__(this->_writer, entry) : 0;
-}
-
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::package_symbols(PkgPtr pkg) {
-  assert(pkg != NULL, "invariant");
-  SymbolPtr pkg_name = pkg->name();
-  assert(pkg_name != NULL, "invariant");
-  SymbolEntryPtr package_symbol = this->_artifacts->map_symbol(pkg_name);
-  assert(package_symbol != NULL, "invariant");
-  return _unique_predicate(package_symbol->id()) ?
-    write__artifact__symbol__entry__(this->_writer, package_symbol) : 0;
-}
-
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::module_symbols(ModPtr module) {
-  assert(module != NULL, "invariant");
-  assert(module->is_named(), "invariant");
-  int count = 0;
-  SymbolPtr sym = module->name();
-  SymbolEntryPtr entry = NULL;
-  if (sym != NULL) {
-    entry = this->_artifacts->map_symbol(sym);
-    assert(entry != NULL, "invariant");
-    if (_unique_predicate(entry->id())) {
-      count += write__artifact__symbol__entry__(this->_writer, entry);
-    }
-  }
-  sym = module->version();
-  if (sym != NULL) {
-    entry = this->_artifacts->map_symbol(sym);
-    assert(entry != NULL, "invariant");
-    if (_unique_predicate(entry->id())) {
-      count += write__artifact__symbol__entry__(this->_writer, entry);
-    }
-  }
-  sym = module->location();
-  if (sym != NULL) {
-    entry = this->_artifacts->map_symbol(sym);
-    assert(entry != NULL, "invariant");
-    if (_unique_predicate(entry->id())) {
-      count += write__artifact__symbol__entry__(this->_writer, entry);
-    }
-  }
-  return count;
+int write__package__leakp(JfrCheckpointWriter* writer, const void* p) {
+  assert(p != NULL, "invariant");
+  PkgPtr pkg = (PkgPtr)p;
+  CLEAR_LEAKP(pkg);
+  return write_package(writer, pkg, true);
 }
 
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::class_loader_symbols(CldPtr cld) {
-  assert(cld != NULL, "invariant");
-  assert(!cld->is_unsafe_anonymous(), "invariant");
-  int count = 0;
-  // class loader type
-  const Klass* class_loader_klass = cld->class_loader_klass();
-  if (class_loader_klass == NULL) {
-    // (primordial) boot class loader
-    CStringEntryPtr entry = this->_artifacts->map_cstring(0);
-    assert(entry != NULL, "invariant");
-    assert(strncmp(entry->literal(),
-      BOOTSTRAP_LOADER_NAME,
-      BOOTSTRAP_LOADER_NAME_LEN) == 0, "invariant");
-    if (_unique_predicate(entry->id())) {
-      count += write__artifact__cstring__entry__(this->_writer, entry);
-    }
-  } else {
-    const Symbol* class_loader_name = cld->name();
-    if (class_loader_name != NULL) {
-      SymbolEntryPtr entry = this->_artifacts->map_symbol(class_loader_name);
-      assert(entry != NULL, "invariant");
-      if (_unique_predicate(entry->id())) {
-        count += write__artifact__symbol__entry__(this->_writer, entry);
-      }
-    }
-  }
-  return count;
-}
-
-template <template <typename> class Predicate>
-int KlassSymbolWriterImpl<Predicate>::method_symbols(KlassPtr klass) {
-  assert(_predicate(klass), "invariant");
-  assert(_method_used_predicate(klass), "invariant");
-  assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
-  int count = 0;
-  const InstanceKlass* const ik = InstanceKlass::cast(klass);
-  const int len = ik->methods()->length();
-  for (int i = 0; i < len; ++i) {
-    MethodPtr method = ik->methods()->at(i);
-    if (_method_flag_predicate(method)) {
-      SymbolEntryPtr entry = this->_artifacts->map_symbol(method->name());
-      assert(entry != NULL, "invariant");
-      if (_unique_predicate(entry->id())) {
-        count += write__artifact__symbol__entry__(this->_writer, entry);
-      }
-      entry = this->_artifacts->map_symbol(method->signature());
-      assert(entry != NULL, "invariant");
-      if (_unique_predicate(entry->id())) {
-        count += write__artifact__symbol__entry__(this->_writer, entry);
-      }
-    }
-  }
-  return count;
+static void do_package(PackageEntry* entry) {
+  do_previous_epoch_artifact(_subsystem_callback, entry);
 }
 
-typedef KlassSymbolWriterImpl<LeakPredicate> LeakKlassSymbolWriterImpl;
-typedef JfrArtifactWriterHost<LeakKlassSymbolWriterImpl, TYPE_SYMBOL> LeakKlassSymbolWriter;
-
-class ClearKlassAndMethods {
- private:
-  ClearArtifact<KlassPtr> _clear_klass_tag_bits;
-  ClearArtifact<MethodPtr> _clear_method_flag;
-  MethodUsedPredicate<false> _method_used_predicate;
-
- public:
-  ClearKlassAndMethods(bool class_unload) : _clear_klass_tag_bits(class_unload),
-                                            _clear_method_flag(class_unload),
-                                            _method_used_predicate(class_unload) {}
-  bool operator()(KlassPtr klass) {
-    if (_method_used_predicate(klass)) {
-      const InstanceKlass* ik = InstanceKlass::cast(klass);
-      const int len = ik->methods()->length();
-      for (int i = 0; i < len; ++i) {
-        MethodPtr method = ik->methods()->at(i);
-        _clear_method_flag(method);
-      }
-    }
-    _clear_klass_tag_bits(klass);
-    return true;
-  }
-};
-
-typedef CompositeFunctor<KlassPtr,
-                         TagLeakpKlassArtifact,
-                         LeakKlassWriter> LeakpKlassArtifactTagging;
-
-typedef CompositeFunctor<KlassPtr,
-                         LeakpKlassArtifactTagging,
-                         KlassWriter> CompositeKlassWriter;
-
-typedef CompositeFunctor<KlassPtr,
-                         CompositeKlassWriter,
-                         KlassArtifactRegistrator> CompositeKlassWriterRegistration;
-
-typedef CompositeFunctor<KlassPtr,
-                         KlassWriter,
-                         KlassArtifactRegistrator> KlassWriterRegistration;
-
-typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
-typedef JfrArtifactCallbackHost<KlassPtr, CompositeKlassWriterRegistration> CompositeKlassCallback;
-
-/*
- * Composite operation
- *
- * TagLeakpKlassArtifact ->
- *   LeakpPredicate ->
- *     LeakpKlassWriter ->
- *       KlassPredicate ->
- *         KlassWriter ->
- *           KlassWriterRegistration
- */
-void JfrTypeSet::write_klass_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(!_artifacts->has_klass_entries(), "invariant");
-  KlassArtifactRegistrator reg(_artifacts);
-  KlassWriter kw(writer, _artifacts, _class_unload);
-  KlassWriterRegistration kwr(&kw, &reg);
-  if (leakp_writer == NULL) {
-    KlassCallback callback(&kwr);
-    _subsystem_callback = &callback;
-    do_klasses();
-    return;
-  }
-  TagLeakpKlassArtifact tagging(_class_unload);
-  LeakKlassWriter lkw(leakp_writer, _artifacts, _class_unload);
-  LeakpKlassArtifactTagging lpkat(&tagging, &lkw);
-  CompositeKlassWriter ckw(&lpkat, &kw);
-  CompositeKlassWriterRegistration ckwr(&ckw, &reg);
-  CompositeKlassCallback callback(&ckwr);
-  _subsystem_callback = &callback;
-  do_klasses();
+static void do_packages() {
+  ClassLoaderDataGraph::packages_do(&do_package);
 }
 
-typedef CompositeFunctor<PkgPtr,
-                         PackageWriter,
-                         ClearArtifact<PkgPtr> > PackageWriterWithClear;
-
-typedef CompositeFunctor<PkgPtr,
-                         LeakPackageWriter,
-                         PackageWriter> CompositePackageWriter;
-
-typedef CompositeFunctor<PkgPtr,
-                         CompositePackageWriter,
-                         ClearArtifact<PkgPtr> > CompositePackageWriterWithClear;
-
 class PackageFieldSelector {
  public:
   typedef PkgPtr TypePtr;
@@ -583,60 +349,86 @@
   }
 };
 
-typedef KlassToFieldEnvelope<PackageFieldSelector,
-                             PackageWriterWithClear> KlassPackageWriterWithClear;
+typedef SerializePredicate<PkgPtr> PackagePredicate;
+typedef JfrPredicatedTypeWriterImplHost<PkgPtr, PackagePredicate, write__package> PackageWriterImpl;
+typedef JfrTypeWriterHost<PackageWriterImpl, TYPE_PACKAGE> PackageWriter;
+typedef CompositeFunctor<PkgPtr, PackageWriter, ClearArtifact<PkgPtr> > PackageWriterWithClear;
+typedef KlassToFieldEnvelope<PackageFieldSelector, PackageWriter> KlassPackageWriter;
+typedef JfrArtifactCallbackHost<PkgPtr, PackageWriterWithClear> PackageCallback;
 
-typedef KlassToFieldEnvelope<PackageFieldSelector,
-                             CompositePackageWriterWithClear> KlassCompositePackageWriterWithClear;
+typedef LeakPredicate<PkgPtr> LeakPackagePredicate;
+typedef JfrPredicatedTypeWriterImplHost<PkgPtr, LeakPackagePredicate, write__package__leakp> LeakPackageWriterImpl;
+typedef JfrTypeWriterHost<LeakPackageWriterImpl, TYPE_PACKAGE> LeakPackageWriter;
 
-typedef JfrArtifactCallbackHost<PkgPtr, PackageWriterWithClear> PackageCallback;
+typedef CompositeFunctor<PkgPtr, LeakPackageWriter, PackageWriter> CompositePackageWriter;
+typedef KlassToFieldEnvelope<PackageFieldSelector, CompositePackageWriter> KlassCompositePackageWriter;
+typedef KlassToFieldEnvelope<PackageFieldSelector, PackageWriterWithClear> KlassPackageWriterWithClear;
+typedef CompositeFunctor<PkgPtr, CompositePackageWriter, ClearArtifact<PkgPtr> > CompositePackageWriterWithClear;
 typedef JfrArtifactCallbackHost<PkgPtr, CompositePackageWriterWithClear> CompositePackageCallback;
 
-/*
- * Composite operation
- *
- * LeakpPackageWriter ->
- *   PackageWriter ->
- *     ClearArtifact<PackageEntry>
- *
- */
-void JfrTypeSet::write_package_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(_artifacts->has_klass_entries(), "invariant");
-  ClearArtifact<PkgPtr> clear(_class_unload);
-  PackageWriter pw(writer, _artifacts, _class_unload);
-  if (leakp_writer == NULL) {
+static void write_packages() {
+  assert(_writer != NULL, "invariant");
+  PackageWriter pw(_writer, _class_unload);
+  KlassPackageWriter kpw(&pw);
+  if (current_epoch()) {
+    _artifacts->iterate_klasses(kpw);
+    _artifacts->tally(pw);
+    return;
+  }
+  assert(previous_epoch(), "invariant");
+  if (_leakp_writer == NULL) {
+    _artifacts->iterate_klasses(kpw);
+    ClearArtifact<PkgPtr> clear;
     PackageWriterWithClear pwwc(&pw, &clear);
-    KlassPackageWriterWithClear kpwwc(&pwwc);
-    _artifacts->iterate_klasses(kpwwc);
     PackageCallback callback(&pwwc);
     _subsystem_callback = &callback;
     do_packages();
-    return;
+  } else {
+    LeakPackageWriter lpw(_leakp_writer, _class_unload);
+    CompositePackageWriter cpw(&lpw, &pw);
+    KlassCompositePackageWriter kcpw(&cpw);
+    _artifacts->iterate_klasses(kcpw);
+    ClearArtifact<PkgPtr> clear;
+    CompositePackageWriterWithClear cpwwc(&cpw, &clear);
+    CompositePackageCallback callback(&cpwwc);
+    _subsystem_callback = &callback;
+    do_packages();
   }
-  LeakPackageWriter lpw(leakp_writer, _artifacts, _class_unload);
-  CompositePackageWriter cpw(&lpw, &pw);
-  CompositePackageWriterWithClear cpwwc(&cpw, &clear);
-  KlassCompositePackageWriterWithClear ckpw(&cpwwc);
-  _artifacts->iterate_klasses(ckpw);
-  CompositePackageCallback callback(&cpwwc);
-  _subsystem_callback = &callback;
-  do_packages();
+  _artifacts->tally(pw);
 }
 
-typedef CompositeFunctor<ModPtr,
-                         ModuleWriter,
-                         ClearArtifact<ModPtr> > ModuleWriterWithClear;
+static int write_module(JfrCheckpointWriter* writer, ModPtr mod, bool leakp) {
+  assert(mod != NULL, "invariant");
+  assert(_artifacts != NULL, "invariant");
+  writer->write(artifact_id(mod));
+  writer->write(mark_symbol(mod->name(), leakp));
+  writer->write(mark_symbol(mod->version(), leakp));
+  writer->write(mark_symbol(mod->location(), leakp));
+  writer->write(cld_id(mod->loader_data(), leakp));
+  return 1;
+}
 
-typedef CompositeFunctor<ModPtr,
-                         LeakModuleWriter,
-                         ModuleWriter> CompositeModuleWriter;
+int write__module(JfrCheckpointWriter* writer, const void* m) {
+  assert(m != NULL, "invariant");
+  ModPtr mod = (ModPtr)m;
+  set_serialized(mod);
+  return write_module(writer, mod, false);
+}
 
-typedef CompositeFunctor<ModPtr,
-                         CompositeModuleWriter,
-                         ClearArtifact<ModPtr> > CompositeModuleWriterWithClear;
+int write__module__leakp(JfrCheckpointWriter* writer, const void* m) {
+  assert(m != NULL, "invariant");
+  ModPtr mod = (ModPtr)m;
+  CLEAR_LEAKP(mod);
+  return write_module(writer, mod, true);
+}
 
-typedef JfrArtifactCallbackHost<ModPtr, ModuleWriterWithClear> ModuleCallback;
-typedef JfrArtifactCallbackHost<ModPtr, CompositeModuleWriterWithClear> CompositeModuleCallback;
+static void do_module(ModuleEntry* entry) {
+  do_previous_epoch_artifact(_subsystem_callback, entry);
+}
+
+static void do_modules() {
+  ClassLoaderDataGraph::modules_do(&do_module);
+}
 
 class ModuleFieldSelector {
  public:
@@ -648,47 +440,88 @@
   }
 };
 
-typedef KlassToFieldEnvelope<ModuleFieldSelector,
-                             ModuleWriterWithClear> KlassModuleWriterWithClear;
+typedef SerializePredicate<ModPtr> ModulePredicate;
+typedef JfrPredicatedTypeWriterImplHost<ModPtr, ModulePredicate, write__module> ModuleWriterImpl;
+typedef JfrTypeWriterHost<ModuleWriterImpl, TYPE_MODULE> ModuleWriter;
+typedef CompositeFunctor<ModPtr, ModuleWriter, ClearArtifact<ModPtr> > ModuleWriterWithClear;
+typedef JfrArtifactCallbackHost<ModPtr, ModuleWriterWithClear> ModuleCallback;
+typedef KlassToFieldEnvelope<ModuleFieldSelector, ModuleWriter> KlassModuleWriter;
 
-typedef KlassToFieldEnvelope<ModuleFieldSelector,
-                             CompositeModuleWriterWithClear> KlassCompositeModuleWriterWithClear;
+typedef LeakPredicate<ModPtr> LeakModulePredicate;
+typedef JfrPredicatedTypeWriterImplHost<ModPtr, LeakModulePredicate, write__module__leakp> LeakModuleWriterImpl;
+typedef JfrTypeWriterHost<LeakModuleWriterImpl, TYPE_MODULE> LeakModuleWriter;
 
-/*
- * Composite operation
- *
- * LeakpModuleWriter ->
- *   ModuleWriter ->
- *     ClearArtifact<ModuleEntry>
- */
-void JfrTypeSet::write_module_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(_artifacts->has_klass_entries(), "invariant");
-  ClearArtifact<ModPtr> clear(_class_unload);
-  ModuleWriter mw(writer, _artifacts, _class_unload);
-  if (leakp_writer == NULL) {
+typedef CompositeFunctor<ModPtr, LeakModuleWriter, ModuleWriter> CompositeModuleWriter;
+typedef KlassToFieldEnvelope<ModuleFieldSelector, CompositeModuleWriter> KlassCompositeModuleWriter;
+typedef CompositeFunctor<ModPtr, CompositeModuleWriter, ClearArtifact<ModPtr> > CompositeModuleWriterWithClear;
+typedef JfrArtifactCallbackHost<ModPtr, CompositeModuleWriterWithClear> CompositeModuleCallback;
+
+static void write_modules() {
+  assert(_writer != NULL, "invariant");
+  ModuleWriter mw(_writer, _class_unload);
+  KlassModuleWriter kmw(&mw);
+  if (current_epoch()) {
+    _artifacts->iterate_klasses(kmw);
+    _artifacts->tally(mw);
+    return;
+  }
+  assert(previous_epoch(), "invariant");
+  if (_leakp_writer == NULL) {
+    _artifacts->iterate_klasses(kmw);
+    ClearArtifact<ModPtr> clear;
     ModuleWriterWithClear mwwc(&mw, &clear);
-    KlassModuleWriterWithClear kmwwc(&mwwc);
-    _artifacts->iterate_klasses(kmwwc);
     ModuleCallback callback(&mwwc);
     _subsystem_callback = &callback;
     do_modules();
-    return;
+  } else {
+    LeakModuleWriter lmw(_leakp_writer, _class_unload);
+    CompositeModuleWriter cmw(&lmw, &mw);
+    KlassCompositeModuleWriter kcpw(&cmw);
+    _artifacts->iterate_klasses(kcpw);
+    ClearArtifact<ModPtr> clear;
+    CompositeModuleWriterWithClear cmwwc(&cmw, &clear);
+    CompositeModuleCallback callback(&cmwwc);
+    _subsystem_callback = &callback;
+    do_modules();
   }
-  LeakModuleWriter lmw(leakp_writer, _artifacts, _class_unload);
-  CompositeModuleWriter cmw(&lmw, &mw);
-  CompositeModuleWriterWithClear cmwwc(&cmw, &clear);
-  KlassCompositeModuleWriterWithClear kmwwc(&cmwwc);
-  _artifacts->iterate_klasses(kmwwc);
-  CompositeModuleCallback callback(&cmwwc);
-  _subsystem_callback = &callback;
-  do_modules();
+  _artifacts->tally(mw);
 }
 
-typedef CompositeFunctor<CldPtr, CldWriter, ClearArtifact<CldPtr> > CldWriterWithClear;
-typedef CompositeFunctor<CldPtr, LeakCldWriter, CldWriter> CompositeCldWriter;
-typedef CompositeFunctor<CldPtr, CompositeCldWriter, ClearArtifact<CldPtr> > CompositeCldWriterWithClear;
-typedef JfrArtifactCallbackHost<CldPtr, CldWriterWithClear> CldCallback;
-typedef JfrArtifactCallbackHost<CldPtr, CompositeCldWriterWithClear> CompositeCldCallback;
+static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp) {
+  assert(cld != NULL, "invariant");
+  assert(!cld->is_unsafe_anonymous(), "invariant");
+  // class loader type
+  const Klass* class_loader_klass = cld->class_loader_klass();
+  if (class_loader_klass == NULL) {
+    // (primordial) boot class loader
+    writer->write(artifact_id(cld)); // class loader instance id
+    writer->write((traceid)0);  // class loader type id (absence of)
+    writer->write(get_bootstrap_name(leakp)); // maps to synthetic name -> "bootstrap"
+  } else {
+    writer->write(artifact_id(cld)); // class loader instance id
+    writer->write(artifact_id(class_loader_klass)); // class loader type id
+    writer->write(mark_symbol(cld->name(), leakp)); // class loader instance name
+  }
+  return 1;
+}
+
+int write__classloader(JfrCheckpointWriter* writer, const void* c) {
+  assert(c != NULL, "invariant");
+  CldPtr cld = (CldPtr)c;
+  set_serialized(cld);
+  return write_classloader(writer, cld, false);
+}
+
+int write__classloader__leakp(JfrCheckpointWriter* writer, const void* c) {
+  assert(c != NULL, "invariant");
+  CldPtr cld = (CldPtr)c;
+  CLEAR_LEAKP(cld);
+  return write_classloader(writer, cld, true);
+}
+
+static void do_class_loader_data(ClassLoaderData* cld) {
+  do_previous_epoch_artifact(_subsystem_callback, cld);
+}
 
 class CldFieldSelector {
  public:
@@ -700,289 +533,328 @@
   }
 };
 
-typedef KlassToFieldEnvelope<CldFieldSelector, CldWriterWithClear> KlassCldWriterWithClear;
-typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriterWithClear> KlassCompositeCldWriterWithClear;
+class CLDCallback : public CLDClosure {
+ public:
+  CLDCallback() {}
+  void do_cld(ClassLoaderData* cld) {
+    assert(cld != NULL, "invariant");
+    if (cld->is_unsafe_anonymous()) {
+      return;
+    }
+    do_class_loader_data(cld);
+  }
+};
+
+static void do_class_loaders() {
+  CLDCallback cld_cb;
+  ClassLoaderDataGraph::loaded_cld_do(&cld_cb);
+}
+
+typedef SerializePredicate<CldPtr> CldPredicate;
+typedef JfrPredicatedTypeWriterImplHost<CldPtr, CldPredicate, write__classloader> CldWriterImpl;
+typedef JfrTypeWriterHost<CldWriterImpl, TYPE_CLASSLOADER> CldWriter;
+typedef CompositeFunctor<CldPtr, CldWriter, ClearArtifact<CldPtr> > CldWriterWithClear;
+typedef JfrArtifactCallbackHost<CldPtr, CldWriterWithClear> CldCallback;
+typedef KlassToFieldEnvelope<CldFieldSelector, CldWriter> KlassCldWriter;
 
-/*
- * Composite operation
- *
- * LeakpClassLoaderWriter ->
- *   ClassLoaderWriter ->
- *     ClearArtifact<ClassLoaderData>
- */
-void JfrTypeSet::write_class_loader_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(_artifacts->has_klass_entries(), "invariant");
-  ClearArtifact<CldPtr> clear(_class_unload);
-  CldWriter cldw(writer, _artifacts, _class_unload);
-  if (leakp_writer == NULL) {
+typedef LeakPredicate<CldPtr> LeakCldPredicate;
+typedef JfrPredicatedTypeWriterImplHost<CldPtr, LeakCldPredicate, write__classloader__leakp> LeakCldWriterImpl;
+typedef JfrTypeWriterHost<LeakCldWriterImpl, TYPE_CLASSLOADER> LeakCldWriter;
+
+typedef CompositeFunctor<CldPtr, LeakCldWriter, CldWriter> CompositeCldWriter;
+typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriter> KlassCompositeCldWriter;
+typedef CompositeFunctor<CldPtr, CompositeCldWriter, ClearArtifact<CldPtr> > CompositeCldWriterWithClear;
+typedef JfrArtifactCallbackHost<CldPtr, CompositeCldWriterWithClear> CompositeCldCallback;
+
+static void write_classloaders() {
+  assert(_writer != NULL, "invariant");
+  CldWriter cldw(_writer, _class_unload);
+  KlassCldWriter kcw(&cldw);
+  if (current_epoch()) {
+    _artifacts->iterate_klasses(kcw);
+    _artifacts->tally(cldw);
+    return;
+  }
+  assert(previous_epoch(), "invariant");
+  if (_leakp_writer == NULL) {
+    _artifacts->iterate_klasses(kcw);
+    ClearArtifact<CldPtr> clear;
     CldWriterWithClear cldwwc(&cldw, &clear);
-    KlassCldWriterWithClear kcldwwc(&cldwwc);
-    _artifacts->iterate_klasses(kcldwwc);
     CldCallback callback(&cldwwc);
     _subsystem_callback = &callback;
     do_class_loaders();
-    return;
+  } else {
+    LeakCldWriter lcldw(_leakp_writer, _class_unload);
+    CompositeCldWriter ccldw(&lcldw, &cldw);
+    KlassCompositeCldWriter kccldw(&ccldw);
+    _artifacts->iterate_klasses(kccldw);
+    ClearArtifact<CldPtr> clear;
+    CompositeCldWriterWithClear ccldwwc(&ccldw, &clear);
+    CompositeCldCallback callback(&ccldwwc);
+    _subsystem_callback = &callback;
+    do_class_loaders();
   }
-  LeakCldWriter lcldw(leakp_writer, _artifacts, _class_unload);
-  CompositeCldWriter ccldw(&lcldw, &cldw);
-  CompositeCldWriterWithClear ccldwwc(&ccldw, &clear);
-  KlassCompositeCldWriterWithClear kcclwwc(&ccldwwc);
-  _artifacts->iterate_klasses(kcclwwc);
-  CompositeCldCallback callback(&ccldwwc);
-  _subsystem_callback = &callback;
-  do_class_loaders();
+  _artifacts->tally(cldw);
+}
+
+static u1 get_visibility(MethodPtr method) {
+  assert(method != NULL, "invariant");
+  return const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0;
+}
+
+template <>
+void set_serialized<Method>(MethodPtr method) {
+  assert(method != NULL, "invariant");
+  SET_METHOD_SERIALIZED(method);
+  assert(IS_METHOD_SERIALIZED(method), "invariant");
 }
 
-template <bool predicate_bool, typename MethodFunctor>
+static int write_method(JfrCheckpointWriter* writer, MethodPtr method, bool leakp) {
+  assert(writer != NULL, "invariant");
+  assert(method != NULL, "invariant");
+  assert(_artifacts != NULL, "invariant");
+  KlassPtr klass = method->method_holder();
+  assert(klass != NULL, "invariant");
+  writer->write(method_id(klass, method));
+  writer->write(artifact_id(klass));
+  writer->write(mark_symbol(method->name(), leakp));
+  writer->write(mark_symbol(method->signature(), leakp));
+  writer->write((u2)get_flags(method));
+  writer->write(get_visibility(method));
+  return 1;
+}
+
+int write__method(JfrCheckpointWriter* writer, const void* m) {
+  assert(m != NULL, "invariant");
+  MethodPtr method = (MethodPtr)m;
+  set_serialized(method);
+  return write_method(writer, method, false);
+}
+
+int write__method__leakp(JfrCheckpointWriter* writer, const void* m) {
+  assert(m != NULL, "invariant");
+  MethodPtr method = (MethodPtr)m;
+  return write_method(writer, method, true);
+}
+
+template <typename MethodCallback, typename KlassCallback, bool leakp>
 class MethodIteratorHost {
  private:
-  MethodFunctor _method_functor;
-  MethodUsedPredicate<predicate_bool> _method_used_predicate;
-  MethodFlagPredicate _method_flag_predicate;
-
+  MethodCallback _method_cb;
+  KlassCallback _klass_cb;
+  MethodUsedPredicate<leakp> _method_used_predicate;
+  MethodFlagPredicate<leakp> _method_flag_predicate;
  public:
   MethodIteratorHost(JfrCheckpointWriter* writer,
-                     JfrArtifactSet* artifacts,
-                     bool class_unload,
+                     bool current_epoch = false,
+                     bool class_unload = false,
                      bool skip_header = false) :
-    _method_functor(writer, artifacts, class_unload, skip_header),
-    _method_used_predicate(class_unload),
-    _method_flag_predicate(class_unload) {}
+    _method_cb(writer, class_unload, skip_header),
+    _klass_cb(writer, class_unload, skip_header),
+    _method_used_predicate(current_epoch),
+    _method_flag_predicate(current_epoch) {}
 
   bool operator()(KlassPtr klass) {
     if (_method_used_predicate(klass)) {
-      assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
-      const InstanceKlass* ik = InstanceKlass::cast(klass);
+      const InstanceKlass* const ik = InstanceKlass::cast(klass);
       const int len = ik->methods()->length();
       for (int i = 0; i < len; ++i) {
         MethodPtr method = ik->methods()->at(i);
         if (_method_flag_predicate(method)) {
-          _method_functor(method);
+          _method_cb(method);
         }
       }
     }
-    return true;
+    return _klass_cb(klass);
   }
 
-  int count() const { return _method_functor.count(); }
-  void add(int count) { _method_functor.add(count); }
+  int count() const { return _method_cb.count(); }
+  void add(int count) { _method_cb.add(count); }
 };
 
-typedef MethodIteratorHost<true /*leakp */,  MethodWriterImpl> LeakMethodWriter;
-typedef MethodIteratorHost<false, MethodWriterImpl> MethodWriter;
-typedef CompositeFunctor<KlassPtr, LeakMethodWriter, MethodWriter> CompositeMethodWriter;
-
-/*
- * Composite operation
- *
- * LeakpMethodWriter ->
- *   MethodWriter
- */
-void JfrTypeSet::write_method_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(_artifacts->has_klass_entries(), "invariant");
-  MethodWriter mw(writer, _artifacts, _class_unload);
-  if (leakp_writer == NULL) {
-    _artifacts->iterate_klasses(mw);
-    return;
-  }
-  LeakMethodWriter lpmw(leakp_writer, _artifacts, _class_unload);
-  CompositeMethodWriter cmw(&lpmw, &mw);
-  _artifacts->iterate_klasses(cmw);
-}
-static void write_symbols_leakp(JfrCheckpointWriter* leakp_writer, JfrArtifactSet* artifacts, bool class_unload) {
-  assert(leakp_writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  LeakKlassSymbolWriter lpksw(leakp_writer, artifacts, class_unload);
-  artifacts->iterate_klasses(lpksw);
-}
-static void write_symbols(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, JfrArtifactSet* artifacts, bool class_unload) {
-  assert(writer != NULL, "invariant");
-  assert(artifacts != NULL, "invariant");
-  if (leakp_writer != NULL) {
-    write_symbols_leakp(leakp_writer, artifacts, class_unload);
-  }
-  // iterate all registered symbols
-  SymbolEntryWriter symbol_writer(writer, artifacts, class_unload);
-  artifacts->iterate_symbols(symbol_writer);
-  CStringEntryWriter cstring_writer(writer, artifacts, class_unload, true); // skip header
-  artifacts->iterate_cstrings(cstring_writer);
-  symbol_writer.add(cstring_writer.count());
-}
-
-bool JfrTypeSet::_class_unload = false;
-JfrArtifactSet* JfrTypeSet::_artifacts = NULL;
-JfrArtifactClosure* JfrTypeSet::_subsystem_callback = NULL;
-
-void JfrTypeSet::write_symbol_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer) {
-  assert(writer != NULL, "invariant");
-  assert(_artifacts->has_klass_entries(), "invariant");
-  write_symbols(writer, leakp_writer, _artifacts, _class_unload);
-}
-
-void JfrTypeSet::do_unloaded_klass(Klass* klass) {
-  assert(klass != NULL, "invariant");
-  assert(_subsystem_callback != NULL, "invariant");
-  if (IS_JDK_JFR_EVENT_SUBKLASS(klass)) {
-    JfrEventClasses::increment_unloaded_event_class();
-  }
-  if (USED_THIS_EPOCH(klass)) { // includes leakp subset
-    _subsystem_callback->do_artifact(klass);
-    return;
-  }
-  if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
-    SET_LEAKP_USED_THIS_EPOCH(klass); // tag leakp "safe byte" for subset inclusion
-    _subsystem_callback->do_artifact(klass);
-  }
-}
-
-void JfrTypeSet::do_klass(Klass* klass) {
-  assert(klass != NULL, "invariant");
-  assert(_subsystem_callback != NULL, "invariant");
-  if (USED_PREV_EPOCH(klass)) { // includes leakp subset
-    _subsystem_callback->do_artifact(klass);
-    return;
-  }
-  if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
-    SET_LEAKP_USED_PREV_EPOCH(klass); // tag leakp "safe byte" for subset inclusion
-    _subsystem_callback->do_artifact(klass);
-  }
-}
-
-void JfrTypeSet::do_klasses() {
-  if (_class_unload) {
-    ClassLoaderDataGraph::classes_unloading_do(&do_unloaded_klass);
-    return;
-  }
-  ClassLoaderDataGraph::classes_do(&do_klass);
-}
-
-void JfrTypeSet::do_unloaded_package(PackageEntry* entry) {
-  assert(entry != NULL, "invariant");
-  assert(_subsystem_callback != NULL, "invariant");
-  if (ANY_USED_THIS_EPOCH(entry)) { // includes leakp subset
-    _subsystem_callback->do_artifact(entry);
-  }
-}
-
-void JfrTypeSet::do_package(PackageEntry* entry) {
-  assert(_subsystem_callback != NULL, "invariant");
-  if (ANY_USED_PREV_EPOCH(entry)) { // includes leakp subset
-    _subsystem_callback->do_artifact(entry);
-  }
-}
-
-void JfrTypeSet::do_packages() {
-  if (_class_unload) {
-    ClassLoaderDataGraph::packages_unloading_do(&do_unloaded_package);
-    return;
-  }
-  ClassLoaderDataGraph::packages_do(&do_package);
-}
-void JfrTypeSet::do_unloaded_module(ModuleEntry* entry) {
-  assert(entry != NULL, "invariant");
-  assert(_subsystem_callback != NULL, "invariant");
-  if (ANY_USED_THIS_EPOCH(entry)) { // includes leakp subset
-    _subsystem_callback->do_artifact(entry);
-  }
-}
-
-void JfrTypeSet::do_module(ModuleEntry* entry) {
-  assert(_subsystem_callback != NULL, "invariant");
-  if (ANY_USED_PREV_EPOCH(entry)) { // includes leakp subset
-    _subsystem_callback->do_artifact(entry);
-  }
-}
-
-void JfrTypeSet::do_modules() {
-  if (_class_unload) {
-    ClassLoaderDataGraph::modules_unloading_do(&do_unloaded_module);
-    return;
-  }
-  ClassLoaderDataGraph::modules_do(&do_module);
-}
-
-void JfrTypeSet::do_unloaded_class_loader_data(ClassLoaderData* cld) {
-  assert(_subsystem_callback != NULL, "invariant");
-  if (ANY_USED_THIS_EPOCH(cld)) { // includes leakp subset
-    _subsystem_callback->do_artifact(cld);
-  }
-}
-
-void JfrTypeSet::do_class_loader_data(ClassLoaderData* cld) {
-  assert(_subsystem_callback != NULL, "invariant");
-  if (ANY_USED_PREV_EPOCH(cld)) { // includes leakp subset
-    _subsystem_callback->do_artifact(cld);
-  }
-}
-
-class CLDCallback : public CLDClosure {
- private:
-  bool _class_unload;
+template <typename T, template <typename> class Impl>
+class Wrapper {
+  Impl<T> _t;
  public:
-  CLDCallback(bool class_unload) : _class_unload(class_unload) {}
-  void do_cld(ClassLoaderData* cld) {
-     assert(cld != NULL, "invariant");
-    if (cld->is_unsafe_anonymous()) {
-      return;
-    }
-    if (_class_unload) {
-      JfrTypeSet::do_unloaded_class_loader_data(cld);
-      return;
-    }
-    JfrTypeSet::do_class_loader_data(cld);
+  Wrapper(JfrCheckpointWriter*, bool, bool) : _t() {}
+  bool operator()(T const& value) {
+    return _t(value);
   }
 };
 
-void JfrTypeSet::do_class_loaders() {
-  CLDCallback cld_cb(_class_unload);
-  if (_class_unload) {
-    ClassLoaderDataGraph::cld_unloading_do(&cld_cb);
-    return;
+typedef SerializePredicate<MethodPtr> MethodPredicate;
+typedef JfrPredicatedTypeWriterImplHost<MethodPtr, MethodPredicate, write__method> MethodWriterImplTarget;
+typedef Wrapper<KlassPtr, Stub> KlassCallbackStub;
+typedef JfrTypeWriterHost<MethodWriterImplTarget, TYPE_METHOD> MethodWriterImpl;
+typedef MethodIteratorHost<MethodWriterImpl, KlassCallbackStub, false> MethodWriter;
+
+typedef LeakPredicate<MethodPtr> LeakMethodPredicate;
+typedef JfrPredicatedTypeWriterImplHost<MethodPtr, LeakMethodPredicate, write__method__leakp> LeakMethodWriterImplTarget;
+typedef JfrTypeWriterHost<LeakMethodWriterImplTarget, TYPE_METHOD> LeakMethodWriterImpl;
+typedef MethodIteratorHost<LeakMethodWriterImpl, KlassCallbackStub, true> LeakMethodWriter;
+typedef CompositeFunctor<KlassPtr, LeakMethodWriter, MethodWriter> CompositeMethodWriter;
+
+static void write_methods() {
+  assert(_writer != NULL, "invariant");
+  MethodWriter mw(_writer, current_epoch(), _class_unload);
+  if (_leakp_writer == NULL) {
+    _artifacts->iterate_klasses(mw);
+  } else {
+    LeakMethodWriter lpmw(_leakp_writer, current_epoch(), _class_unload);
+    CompositeMethodWriter cmw(&lpmw, &mw);
+    _artifacts->iterate_klasses(cmw);
   }
-  ClassLoaderDataGraph::loaded_cld_do(&cld_cb);
+  _artifacts->tally(mw);
+}
+
+template <>
+void set_serialized<JfrSymbolId::SymbolEntry>(SymbolEntryPtr ptr) {
+  assert(ptr != NULL, "invariant");
+  ptr->set_serialized();
+  assert(ptr->is_serialized(), "invariant");
+}
+
+template <>
+void set_serialized<JfrSymbolId::CStringEntry>(CStringEntryPtr ptr) {
+  assert(ptr != NULL, "invariant");
+  ptr->set_serialized();
+  assert(ptr->is_serialized(), "invariant");
+}
+
+static int write_symbol(JfrCheckpointWriter* writer, SymbolEntryPtr entry, bool leakp) {
+  assert(writer != NULL, "invariant");
+  assert(entry != NULL, "invariant");
+  ResourceMark rm;
+  writer->write(create_symbol_id(entry->id()));
+  writer->write(entry->value()->as_C_string());
+  return 1;
+}
+
+int write__symbol(JfrCheckpointWriter* writer, const void* e) {
+  assert(e != NULL, "invariant");
+  SymbolEntryPtr entry = (SymbolEntryPtr)e;
+  set_serialized(entry);
+  return write_symbol(writer, entry, false);
+}
+
+int write__symbol__leakp(JfrCheckpointWriter* writer, const void* e) {
+  assert(e != NULL, "invariant");
+  SymbolEntryPtr entry = (SymbolEntryPtr)e;
+  return write_symbol(writer, entry, true);
+}
+
+static int write_cstring(JfrCheckpointWriter* writer, CStringEntryPtr entry, bool leakp) {
+  assert(writer != NULL, "invariant");
+  assert(entry != NULL, "invariant");
+  writer->write(create_symbol_id(entry->id()));
+  writer->write(entry->value());
+  return 1;
+}
+
+int write__cstring(JfrCheckpointWriter* writer, const void* e) {
+  assert(e != NULL, "invariant");
+  CStringEntryPtr entry = (CStringEntryPtr)e;
+  set_serialized(entry);
+  return write_cstring(writer, entry, false);
 }
 
-static void clear_artifacts(JfrArtifactSet* artifacts,
-                            bool class_unload) {
-  assert(artifacts != NULL, "invariant");
-  assert(artifacts->has_klass_entries(), "invariant");
+int write__cstring__leakp(JfrCheckpointWriter* writer, const void* e) {
+  assert(e != NULL, "invariant");
+  CStringEntryPtr entry = (CStringEntryPtr)e;
+  return write_cstring(writer, entry, true);
+}
+
+typedef SymbolPredicate<SymbolEntryPtr, false> SymPredicate;
+typedef JfrPredicatedTypeWriterImplHost<SymbolEntryPtr, SymPredicate, write__symbol> SymbolEntryWriterImpl;
+typedef JfrTypeWriterHost<SymbolEntryWriterImpl, TYPE_SYMBOL> SymbolEntryWriter;
+typedef SymbolPredicate<CStringEntryPtr, false> CStringPredicate;
+typedef JfrPredicatedTypeWriterImplHost<CStringEntryPtr, CStringPredicate, write__cstring> CStringEntryWriterImpl;
+typedef JfrTypeWriterHost<CStringEntryWriterImpl, TYPE_SYMBOL> CStringEntryWriter;
+
+typedef SymbolPredicate<SymbolEntryPtr, true> LeakSymPredicate;
+typedef JfrPredicatedTypeWriterImplHost<SymbolEntryPtr, LeakSymPredicate, write__symbol__leakp> LeakSymbolEntryWriterImpl;
+typedef JfrTypeWriterHost<LeakSymbolEntryWriterImpl, TYPE_SYMBOL> LeakSymbolEntryWriter;
+typedef CompositeFunctor<SymbolEntryPtr, LeakSymbolEntryWriter, SymbolEntryWriter> CompositeSymbolWriter;
+typedef SymbolPredicate<CStringEntryPtr, true> LeakCStringPredicate;
+typedef JfrPredicatedTypeWriterImplHost<CStringEntryPtr, LeakCStringPredicate, write__cstring__leakp> LeakCStringEntryWriterImpl;
+typedef JfrTypeWriterHost<LeakCStringEntryWriterImpl, TYPE_SYMBOL> LeakCStringEntryWriter;
+typedef CompositeFunctor<CStringEntryPtr, LeakCStringEntryWriter, CStringEntryWriter> CompositeCStringWriter;
+
+static void write_symbols_with_leakp() {
+  assert(_leakp_writer != NULL, "invariant");
+  SymbolEntryWriter sw(_writer, _class_unload);
+  LeakSymbolEntryWriter lsw(_leakp_writer, _class_unload);
+  CompositeSymbolWriter csw(&lsw, &sw);
+  _artifacts->iterate_symbols(csw);
+  CStringEntryWriter ccsw(_writer, _class_unload, true); // skip header
+  LeakCStringEntryWriter lccsw(_leakp_writer, _class_unload, true); // skip header
+  CompositeCStringWriter cccsw(&lccsw, &ccsw);
+  _artifacts->iterate_cstrings(cccsw);
+  sw.add(ccsw.count());
+  lsw.add(lccsw.count());
+  _artifacts->tally(sw);
+}
 
-  // untag
-  ClearKlassAndMethods clear(class_unload);
-  artifacts->iterate_klasses(clear);
-  artifacts->clear();
+static void write_symbols() {
+  assert(_writer != NULL, "invariant");
+  if (_leakp_writer != NULL) {
+    write_symbols_with_leakp();
+    return;
+  }
+  SymbolEntryWriter sw(_writer, _class_unload);
+  _artifacts->iterate_symbols(sw);
+  CStringEntryWriter csw(_writer, _class_unload, true); // skip header
+  _artifacts->iterate_cstrings(csw);
+  sw.add(csw.count());
+  _artifacts->tally(sw);
+}
+
+typedef Wrapper<KlassPtr, ClearArtifact> ClearKlassBits;
+typedef Wrapper<MethodPtr, ClearArtifact> ClearMethodFlag;
+typedef MethodIteratorHost<ClearMethodFlag, ClearKlassBits, false> ClearKlassAndMethods;
+
+static size_t teardown() {
+  assert(_artifacts != NULL, "invariant");
+  const size_t total_count = _artifacts->total_count();
+  if (previous_epoch()) {
+    assert(_writer != NULL, "invariant");
+    ClearKlassAndMethods clear(_writer);
+    _artifacts->iterate_klasses(clear);
+    _artifacts->clear();
+    ++checkpoint_id;
+  }
+  return total_count;
+}
+
+static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload) {
+  _writer = writer;
+  _leakp_writer = leakp_writer;
+  _class_unload = class_unload;
+  if (_artifacts == NULL) {
+    _artifacts = new JfrArtifactSet(class_unload);
+  } else {
+    _artifacts->initialize(class_unload);
+  }
+  assert(_artifacts != NULL, "invariant");
+  assert(!_artifacts->has_klass_entries(), "invariant");
 }
 
 /**
  * Write all "tagged" (in-use) constant artifacts and their dependencies.
  */
-void JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload) {
+size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload) {
   assert(writer != NULL, "invariant");
   ResourceMark rm;
-  // initialization begin
-  _class_unload = class_unload;
-  ++checkpoint_id;
-  if (_artifacts == NULL) {
-    _artifacts = new JfrArtifactSet(class_unload);
-    _subsystem_callback = NULL;
-  } else {
-    _artifacts->initialize(class_unload);
-    _subsystem_callback = NULL;
-  }
-  assert(_artifacts != NULL, "invariant");
-  assert(!_artifacts->has_klass_entries(), "invariant");
-  assert(_subsystem_callback == NULL, "invariant");
-  // initialization complete
-
+  setup(writer, leakp_writer, class_unload);
   // write order is important because an individual write step
   // might tag an artifact to be written in a subsequent step
-  write_klass_constants(writer, leakp_writer);
-  if (_artifacts->has_klass_entries()) {
-    write_package_constants(writer, leakp_writer);
-    write_module_constants(writer, leakp_writer);
-    write_class_loader_constants(writer, leakp_writer);
-    write_method_constants(writer, leakp_writer);
-    write_symbol_constants(writer, leakp_writer);
-    clear_artifacts(_artifacts, class_unload);
+  if (!write_klasses()) {
+    return 0;
   }
+  write_packages();
+  write_modules();
+  write_classloaders();
+  write_methods();
+  write_symbols();
+  return teardown();
 }
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -27,47 +27,11 @@
 
 #include "jfr/utilities/jfrAllocation.hpp"
 
-class ClassLoaderData;
-class JfrArtifactClosure;
-class JfrArtifactSet;
 class JfrCheckpointWriter;
-class Klass;
-
-class ModuleEntry;
-class PackageEntry;
 
 class JfrTypeSet : AllStatic {
-  friend class CLDCallback;
-  friend class JfrTypeManager;
-  friend class TypeSetSerialization;
- private:
-  static JfrArtifactSet* _artifacts;
-  static JfrArtifactClosure* _subsystem_callback;
-  static bool _class_unload;
-
-  static void do_klass(Klass* k);
-  static void do_unloaded_klass(Klass* k);
-  static void do_klasses();
-
-  static void do_package(PackageEntry* entry);
-  static void do_unloaded_package(PackageEntry* entry);
-  static void do_packages();
-
-  static void do_module(ModuleEntry* entry);
-  static void do_unloaded_module(ModuleEntry* entry);
-  static void do_modules();
-
-  static void do_class_loader_data(ClassLoaderData* cld);
-  static void do_unloaded_class_loader_data(ClassLoaderData* cld);
-  static void do_class_loaders();
-
-  static void write_klass_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_package_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_module_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_class_loader_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_method_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void write_symbol_constants(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer);
-  static void serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload);
+ public:
+  static size_t serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload);
 };
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESET_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,15 +28,28 @@
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 
-JfrSymbolId::JfrSymbolId() : _sym_table(new SymbolTable(this)), _cstring_table(new CStringTable(this)), _symbol_id_counter(0) {
+static JfrSymbolId::CStringEntry* bootstrap = NULL;
+
+JfrSymbolId::JfrSymbolId() :
+  _sym_table(new SymbolTable(this)),
+  _cstring_table(new CStringTable(this)),
+  _sym_list(NULL),
+  _cstring_list(NULL),
+  _symbol_id_counter(1),
+  _class_unload(false) {
   assert(_sym_table != NULL, "invariant");
   assert(_cstring_table != NULL, "invariant");
-  initialize();
+  bootstrap = new CStringEntry(0, (const char*)&BOOTSTRAP_LOADER_NAME);
+  assert(bootstrap != NULL, "invariant");
+  bootstrap->set_id(1);
+  _cstring_list = bootstrap;
 }
 
-void JfrSymbolId::initialize() {
+JfrSymbolId::~JfrSymbolId() {
   clear();
-  assert(_symbol_id_counter == 0, "invariant");
+  delete _sym_table;
+  delete _cstring_table;
+  delete bootstrap;
 }
 
 void JfrSymbolId::clear() {
@@ -51,106 +64,115 @@
     _cstring_table->clear_entries();
   }
   assert(!_cstring_table->has_entries(), "invariant");
-  _symbol_id_counter = 0;
-}
 
-JfrSymbolId::~JfrSymbolId() {
-  delete _sym_table;
-  delete _cstring_table;
+  _sym_list = NULL;
+  _cstring_list = NULL;
+  _symbol_id_counter = 1;
+
+  assert(bootstrap != NULL, "invariant");
+  bootstrap->reset();
+  _cstring_list = bootstrap;
 }
 
-traceid JfrSymbolId::mark_unsafe_anonymous_klass_name(const Klass* k) {
-  assert(k != NULL, "invariant");
-  assert(k->is_instance_klass(), "invariant");
-  assert(is_unsafe_anonymous_klass(k), "invariant");
-
-  uintptr_t anonymous_symbol_hash_code = 0;
-  const char* const anonymous_symbol =
-    create_unsafe_anonymous_klass_symbol((const InstanceKlass*)k, anonymous_symbol_hash_code);
-
-  if (anonymous_symbol == NULL) {
-    return 0;
-  }
-
-  assert(anonymous_symbol_hash_code != 0, "invariant");
-  traceid symbol_id = mark(anonymous_symbol, anonymous_symbol_hash_code);
-  assert(mark(anonymous_symbol, anonymous_symbol_hash_code) == symbol_id, "invariant");
-  return symbol_id;
+void JfrSymbolId::set_class_unload(bool class_unload) {
+  _class_unload = class_unload;
 }
 
-const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(const Symbol* symbol) const {
-  return _sym_table->lookup_only(symbol, (uintptr_t)const_cast<Symbol*>(symbol)->identity_hash());
-}
-
-const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(uintptr_t hash) const {
-  return _sym_table->lookup_only(NULL, hash);
+void JfrSymbolId::on_link(const SymbolEntry* entry) {
+  assert(entry != NULL, "invariant");
+  const_cast<Symbol*>(entry->literal())->increment_refcount();
+  assert(entry->id() == 0, "invariant");
+  entry->set_id(++_symbol_id_counter);
+  entry->set_list_next(_sym_list);
+  _sym_list = entry;
 }
 
-const JfrSymbolId::CStringEntry* JfrSymbolId::map_cstring(uintptr_t hash) const {
-  return _cstring_table->lookup_only(NULL, hash);
-}
-
-void JfrSymbolId::assign_id(SymbolEntry* entry) {
-  assert(entry != NULL, "invariant");
-  assert(entry->id() == 0, "invariant");
-  entry->set_id(++_symbol_id_counter);
-}
-
-bool JfrSymbolId::equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry) {
+bool JfrSymbolId::on_equals(uintptr_t hash, const SymbolEntry* entry) {
   // query might be NULL
   assert(entry != NULL, "invariant");
   assert(entry->hash() == hash, "invariant");
   return true;
 }
 
-void JfrSymbolId::assign_id(CStringEntry* entry) {
+void JfrSymbolId::on_unlink(const SymbolEntry* entry) {
+  assert(entry != NULL, "invariant");
+  const_cast<Symbol*>(entry->literal())->decrement_refcount();
+}
+
+void JfrSymbolId::on_link(const CStringEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->id() == 0, "invariant");
   entry->set_id(++_symbol_id_counter);
+  entry->set_list_next(_cstring_list);
+  _cstring_list = entry;
 }
 
-bool JfrSymbolId::equals(const char* query, uintptr_t hash, const CStringEntry* entry) {
-  // query might be NULL
+bool JfrSymbolId::on_equals(uintptr_t hash, const CStringEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->hash() == hash, "invariant");
   return true;
 }
 
-traceid JfrSymbolId::mark(const Klass* k) {
-  assert(k != NULL, "invariant");
-  traceid symbol_id = 0;
-  if (is_unsafe_anonymous_klass(k)) {
-    symbol_id = mark_unsafe_anonymous_klass_name(k);
+void JfrSymbolId::on_unlink(const CStringEntry* entry) {
+  assert(entry != NULL, "invariant");
+  JfrCHeapObj::free(const_cast<char*>(entry->literal()), strlen(entry->literal() + 1));
+}
+
+traceid JfrSymbolId::bootstrap_name(bool leakp) {
+  assert(bootstrap != NULL, "invariant");
+  if (leakp) {
+    bootstrap->set_leakp();
   }
-  if (0 == symbol_id) {
-    const Symbol* const sym = k->name();
-    if (sym != NULL) {
-      symbol_id = mark(sym);
-    }
-  }
-  assert(symbol_id > 0, "a symbol handler must mark the symbol for writing");
-  return symbol_id;
+  return 1;
+}
+
+traceid JfrSymbolId::mark(const Symbol* symbol, bool leakp) {
+  assert(symbol != NULL, "invariant");
+  return mark((uintptr_t)symbol->identity_hash(), symbol, leakp);
 }
 
-traceid JfrSymbolId::mark(const Symbol* symbol) {
-  assert(symbol != NULL, "invariant");
-  return mark(symbol, (uintptr_t)const_cast<Symbol*>(symbol)->identity_hash());
-}
+static unsigned int last_symbol_hash = 0;
+static traceid last_symbol_id = 0;
 
-traceid JfrSymbolId::mark(const Symbol* data, uintptr_t hash) {
+traceid JfrSymbolId::mark(uintptr_t hash, const Symbol* data, bool leakp) {
   assert(data != NULL, "invariant");
   assert(_sym_table != NULL, "invariant");
-  return _sym_table->id(data, hash);
+  if (hash == last_symbol_hash) {
+    assert(last_symbol_id != 0, "invariant");
+    return last_symbol_id;
+  }
+  const SymbolEntry& entry = _sym_table->lookup_put(hash, data);
+  if (_class_unload) {
+    entry.set_unloading();
+  }
+  if (leakp) {
+    entry.set_leakp();
+  }
+  last_symbol_hash = hash;
+  last_symbol_id = entry.id();
+  return last_symbol_id;
 }
 
-traceid JfrSymbolId::mark(const char* str, uintptr_t hash) {
+static unsigned int last_cstring_hash = 0;
+static traceid last_cstring_id = 0;
+
+traceid JfrSymbolId::mark(uintptr_t hash, const char* str, bool leakp) {
   assert(str != NULL, "invariant");
-  return _cstring_table->id(str, hash);
-}
-
-bool JfrSymbolId::is_unsafe_anonymous_klass(const Klass* k) {
-  assert(k != NULL, "invariant");
-  return k->is_instance_klass() && ((const InstanceKlass*)k)->is_unsafe_anonymous();
+  assert(_cstring_table != NULL, "invariant");
+  if (hash == last_cstring_hash) {
+    assert(last_cstring_id != 0, "invariant");
+    return last_cstring_id;
+  }
+  const CStringEntry& entry = _cstring_table->lookup_put(hash, str);
+  if (_class_unload) {
+    entry.set_unloading();
+  }
+  if (leakp) {
+    entry.set_leakp();
+  }
+  last_cstring_hash = hash;
+  last_cstring_id = entry.id();
+  return last_cstring_id;
 }
 
 /*
@@ -161,7 +183,7 @@
 * caller needs ResourceMark
 */
 
-uintptr_t JfrSymbolId::unsafe_anonymous_klass_name_hash_code(const InstanceKlass* ik) {
+uintptr_t JfrSymbolId::unsafe_anonymous_klass_name_hash(const InstanceKlass* ik) {
   assert(ik != NULL, "invariant");
   assert(ik->is_unsafe_anonymous(), "invariant");
   const oop mirror = ik->java_mirror_no_keepalive();
@@ -169,19 +191,18 @@
   return (uintptr_t)mirror->identity_hash();
 }
 
-const char* JfrSymbolId::create_unsafe_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t& hashcode) {
+static const char* create_unsafe_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t hash) {
   assert(ik != NULL, "invariant");
   assert(ik->is_unsafe_anonymous(), "invariant");
-  assert(0 == hashcode, "invariant");
+  assert(hash != 0, "invariant");
   char* anonymous_symbol = NULL;
   const oop mirror = ik->java_mirror_no_keepalive();
   assert(mirror != NULL, "invariant");
   char hash_buf[40];
-  hashcode = unsafe_anonymous_klass_name_hash_code(ik);
-  sprintf(hash_buf, "/" UINTX_FORMAT, hashcode);
+  sprintf(hash_buf, "/" UINTX_FORMAT, hash);
   const size_t hash_len = strlen(hash_buf);
   const size_t result_len = ik->name()->utf8_length();
-  anonymous_symbol = NEW_RESOURCE_ARRAY(char, result_len + hash_len + 1);
+  anonymous_symbol = JfrCHeapObj::new_array<char>(result_len + hash_len + 1);
   ik->name()->as_klass_external_name(anonymous_symbol, (int)result_len + 1);
   assert(strlen(anonymous_symbol) == result_len, "invariant");
   strcpy(anonymous_symbol + result_len, hash_buf);
@@ -189,70 +210,102 @@
   return anonymous_symbol;
 }
 
-uintptr_t JfrSymbolId::regular_klass_name_hash_code(const Klass* k) {
+bool JfrSymbolId::is_unsafe_anonymous_klass(const Klass* k) {
   assert(k != NULL, "invariant");
-  const Symbol* const sym = k->name();
-  assert(sym != NULL, "invariant");
-  return (uintptr_t)const_cast<Symbol*>(sym)->identity_hash();
+  return k->is_instance_klass() && ((const InstanceKlass*)k)->is_unsafe_anonymous();
+}
+
+static unsigned int last_anonymous_hash = 0;
+static traceid last_anonymous_id = 0;
+
+traceid JfrSymbolId::mark_unsafe_anonymous_klass_name(const InstanceKlass* ik, bool leakp) {
+  assert(ik != NULL, "invariant");
+  assert(ik->is_unsafe_anonymous(), "invariant");
+  const uintptr_t hash = unsafe_anonymous_klass_name_hash(ik);
+  if (hash == last_anonymous_hash) {
+    assert(last_anonymous_id != 0, "invariant");
+    return last_anonymous_id;
+  }
+  last_anonymous_hash = hash;
+  const CStringEntry* const entry = _cstring_table->lookup_only(hash);
+  last_anonymous_id = entry != NULL ? entry->id() : mark(hash, create_unsafe_anonymous_klass_symbol(ik, hash), leakp);
+  return last_anonymous_id;
+}
+
+traceid JfrSymbolId::mark(const Klass* k, bool leakp) {
+  assert(k != NULL, "invariant");
+  traceid symbol_id = 0;
+  if (is_unsafe_anonymous_klass(k)) {
+    assert(k->is_instance_klass(), "invariant");
+    symbol_id = mark_unsafe_anonymous_klass_name((const InstanceKlass*)k, leakp);
+  }
+  if (0 == symbol_id) {
+    Symbol* const sym = k->name();
+    if (sym != NULL) {
+      symbol_id = mark(sym, leakp);
+    }
+  }
+  assert(symbol_id > 0, "a symbol handler must mark the symbol for writing");
+  return symbol_id;
+}
+
+static void reset_symbol_caches() {
+  last_anonymous_hash = 0;
+  last_symbol_hash = 0;
+  last_cstring_hash = 0;
 }
 
 JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_id(new JfrSymbolId()),
-                                                    _klass_list(NULL),
-                                                    _class_unload(class_unload) {
+                                                     _klass_list(NULL),
+                                                     _total_count(0) {
   initialize(class_unload);
   assert(_klass_list != NULL, "invariant");
 }
 
 static const size_t initial_class_list_size = 200;
+
 void JfrArtifactSet::initialize(bool class_unload) {
   assert(_symbol_id != NULL, "invariant");
-  _symbol_id->initialize();
-  assert(!_symbol_id->has_entries(), "invariant");
-  _symbol_id->mark(BOOTSTRAP_LOADER_NAME, 0); // pre-load "bootstrap"
-  _class_unload = class_unload;
+  _symbol_id->set_class_unload(class_unload);
+  _total_count = 0;
   // resource allocation
   _klass_list = new GrowableArray<const Klass*>(initial_class_list_size, false, mtTracing);
 }
 
 JfrArtifactSet::~JfrArtifactSet() {
   clear();
+  delete _symbol_id;
 }
 
 void JfrArtifactSet::clear() {
+  reset_symbol_caches();
   _symbol_id->clear();
   // _klass_list will be cleared by a ResourceMark
 }
 
-traceid JfrArtifactSet::mark_unsafe_anonymous_klass_name(const Klass* klass) {
-  return _symbol_id->mark_unsafe_anonymous_klass_name(klass);
+traceid JfrArtifactSet::bootstrap_name(bool leakp) {
+  return _symbol_id->bootstrap_name(leakp);
 }
 
-traceid JfrArtifactSet::mark(const Symbol* sym, uintptr_t hash) {
-  return _symbol_id->mark(sym, hash);
-}
-
-traceid JfrArtifactSet::mark(const Klass* klass) {
-  return _symbol_id->mark(klass);
+traceid JfrArtifactSet::mark_unsafe_anonymous_klass_name(const Klass* klass, bool leakp) {
+  assert(klass->is_instance_klass(), "invariant");
+  return _symbol_id->mark_unsafe_anonymous_klass_name((const InstanceKlass*)klass, leakp);
 }
 
-traceid JfrArtifactSet::mark(const Symbol* symbol) {
-  return _symbol_id->mark(symbol);
-}
-
-traceid JfrArtifactSet::mark(const char* const str, uintptr_t hash) {
-  return _symbol_id->mark(str, hash);
+traceid JfrArtifactSet::mark(uintptr_t hash, const Symbol* sym, bool leakp) {
+  return _symbol_id->mark(hash, sym, leakp);
 }
 
-const JfrSymbolId::SymbolEntry* JfrArtifactSet::map_symbol(const Symbol* symbol) const {
-  return _symbol_id->map_symbol(symbol);
+traceid JfrArtifactSet::mark(const Klass* klass, bool leakp) {
+  return _symbol_id->mark(klass, leakp);
 }
 
-const JfrSymbolId::SymbolEntry* JfrArtifactSet::map_symbol(uintptr_t hash) const {
-  return _symbol_id->map_symbol(hash);
+traceid JfrArtifactSet::mark(const Symbol* symbol, bool leakp) {
+  return _symbol_id->mark(symbol, leakp);
 }
 
-const JfrSymbolId::CStringEntry* JfrArtifactSet::map_cstring(uintptr_t hash) const {
-  return _symbol_id->map_cstring(hash);
+traceid JfrArtifactSet::mark(uintptr_t hash, const char* const str, bool leakp) {
+  return _symbol_id->mark(hash, str, leakp);
 }
 
 bool JfrArtifactSet::has_klass_entries() const {
@@ -269,3 +322,7 @@
   assert(_klass_list->find(k) == -1, "invariant");
   _klass_list->append(k);
 }
+
+size_t JfrArtifactSet::total_count() const {
+  return _total_count;
+}
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -76,223 +76,203 @@
 };
 
 template <typename T>
-void tag_leakp_artifact(T const& value, bool class_unload) {
-  assert(value != NULL, "invariant");
-  if (class_unload) {
-    SET_LEAKP_USED_THIS_EPOCH(value);
-    assert(LEAKP_USED_THIS_EPOCH(value), "invariant");
-  } else {
-    SET_LEAKP_USED_PREV_EPOCH(value);
-    assert(LEAKP_USED_PREV_EPOCH(value), "invariant");
-  }
-}
-
-template <typename T>
-class LeakpClearArtifact {
-  bool _class_unload;
+class ClearArtifact {
  public:
-  LeakpClearArtifact(bool class_unload) : _class_unload(class_unload) {}
   bool operator()(T const& value) {
-    if (_class_unload) {
-      if (LEAKP_USED_THIS_EPOCH(value)) {
-        LEAKP_UNUSE_THIS_EPOCH(value);
-      }
-    } else {
-      if (LEAKP_USED_PREV_EPOCH(value)) {
-        LEAKP_UNUSE_PREV_EPOCH(value);
-      }
-    }
-    return true;
-  }
-};
-
-template <typename T>
-class ClearArtifact {
-  bool _class_unload;
- public:
-  ClearArtifact(bool class_unload) : _class_unload(class_unload) {}
-  bool operator()(T const& value) {
-    if (_class_unload) {
-      if (LEAKP_USED_THIS_EPOCH(value)) {
-        LEAKP_UNUSE_THIS_EPOCH(value);
-      }
-      if (USED_THIS_EPOCH(value)) {
-        UNUSE_THIS_EPOCH(value);
-      }
-      if (METHOD_USED_THIS_EPOCH(value)) {
-        UNUSE_METHOD_THIS_EPOCH(value);
-      }
-    } else {
-      if (LEAKP_USED_PREV_EPOCH(value)) {
-        LEAKP_UNUSE_PREV_EPOCH(value);
-      }
-      if (USED_PREV_EPOCH(value)) {
-        UNUSE_PREV_EPOCH(value);
-      }
-      if (METHOD_USED_PREV_EPOCH(value)) {
-        UNUSE_METHOD_PREV_EPOCH(value);
-      }
-    }
+    CLEAR_METHOD_AND_CLASS_PREV_EPOCH(value);
+    CLEAR_SERIALIZED(value);
+    assert(IS_NOT_SERIALIZED(value), "invariant");
     return true;
   }
 };
 
 template <>
 class ClearArtifact<const Method*> {
+ public:
+  bool operator()(const Method* method) {
+    assert(METHOD_FLAG_USED_PREV_EPOCH(method), "invariant");
+    CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
+    CLEAR_METHOD_SERIALIZED(method);
+    assert(METHOD_NOT_SERIALIZED(method), "invariant");
+    return true;
+  }
+};
+
+template <typename T>
+class Stub {
+ public:
+  bool operator()(T const& value) { return true; }
+};
+
+template <typename T>
+class SerializePredicate {
+  bool _class_unload;
+ public:
+  SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(T const& value) {
+    assert(value != NULL, "invariant");
+    return _class_unload ? true : IS_NOT_SERIALIZED(value);
+  }
+};
+
+template <>
+class SerializePredicate<const Method*> {
   bool _class_unload;
  public:
-  ClearArtifact(bool class_unload) : _class_unload(class_unload) {}
+  SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
   bool operator()(const Method* method) {
+    assert(method != NULL, "invariant");
+    return _class_unload ? true : METHOD_NOT_SERIALIZED(method);
+  }
+};
+
+template <typename T, bool leakp>
+class SymbolPredicate {
+  bool _class_unload;
+ public:
+  SymbolPredicate(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(T const& value) {
+    assert(value != NULL, "invariant");
     if (_class_unload) {
-      if (METHOD_FLAG_USED_THIS_EPOCH(method)) {
-        CLEAR_METHOD_FLAG_USED_THIS_EPOCH(method);
-      }
-    } else {
-      if (METHOD_FLAG_USED_PREV_EPOCH(method)) {
-        CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
-      }
+      return leakp ? value->is_leakp() : value->is_unloading();
+    }
+    return leakp ? value->is_leakp() : !value->is_serialized();
+  }
+};
+
+template <bool leakp>
+class MethodUsedPredicate {
+  bool _current_epoch;
+public:
+  MethodUsedPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
+  bool operator()(const Klass* klass) {
+    if (_current_epoch) {
+      return leakp ? IS_LEAKP(klass) : METHOD_USED_THIS_EPOCH(klass);
     }
-    return true;
+    return  leakp ? IS_LEAKP(klass) : METHOD_USED_PREV_EPOCH(klass);
+  }
+};
+
+template <bool leakp>
+class MethodFlagPredicate {
+  bool _current_epoch;
+ public:
+  MethodFlagPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
+  bool operator()(const Method* method) {
+    if (_current_epoch) {
+      return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_THIS_EPOCH(method);
+    }
+    return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_PREV_EPOCH(method);
   }
 };
 
 template <typename T>
 class LeakPredicate {
-  bool _class_unload;
  public:
-  LeakPredicate(bool class_unload) : _class_unload(class_unload) {}
+  LeakPredicate(bool class_unload) {}
   bool operator()(T const& value) {
-    return _class_unload ? LEAKP_USED_THIS_EPOCH(value) : LEAKP_USED_PREV_EPOCH(value);
+    return IS_LEAKP(value);
   }
 };
 
-template <typename T>
-class UsedPredicate {
-  bool _class_unload;
+template <>
+class LeakPredicate<const Method*> {
  public:
-  UsedPredicate(bool class_unload) : _class_unload(class_unload) {}
-  bool operator()(T const& value) {
-    return _class_unload ? USED_THIS_EPOCH(value) : USED_PREV_EPOCH(value);
+  LeakPredicate(bool class_unload) {}
+  bool operator()(const Method* method) {
+    assert(method != NULL, "invariant");
+    return IS_METHOD_LEAKP_USED(method);
   }
 };
 
-template <typename T, int compare(const T&, const T&)>
-class UniquePredicate {
- private:
-  GrowableArray<T> _seen;
+template <typename T, typename IdType>
+class ListEntry : public JfrHashtableEntry<T, IdType> {
  public:
-  UniquePredicate(bool) : _seen() {}
-  bool operator()(T const& value) {
-    bool not_unique;
-    _seen.template find_sorted<T, compare>(value, not_unique);
-    if (not_unique) {
-      return false;
-    }
-    _seen.template insert_sorted<compare>(value);
-    return true;
+  ListEntry(uintptr_t hash, const T& data) : JfrHashtableEntry<T, IdType>(hash, data),
+    _list_next(NULL), _serialized(false), _unloading(false), _leakp(false) {}
+  const ListEntry<T, IdType>* list_next() const { return _list_next; }
+  void reset() const {
+    _list_next = NULL; _serialized = false; _unloading = false; _leakp = false;
   }
-};
-
-class MethodFlagPredicate {
-  bool _class_unload;
- public:
-  MethodFlagPredicate(bool class_unload) : _class_unload(class_unload) {}
-  bool operator()(const Method* method) {
-    return _class_unload ? METHOD_FLAG_USED_THIS_EPOCH(method) : METHOD_FLAG_USED_PREV_EPOCH(method);
-  }
-};
-
-template <bool leakp>
-class MethodUsedPredicate {
-  bool _class_unload;
- public:
-  MethodUsedPredicate(bool class_unload) : _class_unload(class_unload) {}
-  bool operator()(const Klass* klass) {
-    assert(ANY_USED(klass), "invariant");
-    if (_class_unload) {
-      return leakp ? LEAKP_METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_THIS_EPOCH(klass);
-    }
-    return leakp ? LEAKP_METHOD_USED_PREV_EPOCH(klass) : METHOD_USED_PREV_EPOCH(klass);
-  }
+  void set_list_next(const ListEntry<T, IdType>* next) const { _list_next = next; }
+  bool is_serialized() const { return _serialized; }
+  void set_serialized() const { _serialized = true; }
+  bool is_unloading() const { return _unloading; }
+  void set_unloading() const { _unloading = true; }
+  bool is_leakp() const { return _leakp; }
+  void set_leakp() const { _leakp = true; }
+ private:
+  mutable const ListEntry<T, IdType>* _list_next;
+  mutable bool _serialized;
+  mutable bool _unloading;
+  mutable bool _leakp;
 };
 
 class JfrSymbolId : public JfrCHeapObj {
   template <typename, typename, template<typename, typename> class, typename, size_t>
   friend class HashTableHost;
-  typedef HashTableHost<const Symbol*, traceid, Entry, JfrSymbolId> SymbolTable;
-  typedef HashTableHost<const char*, traceid, Entry, JfrSymbolId> CStringTable;
+  typedef HashTableHost<const Symbol*, traceid, ListEntry, JfrSymbolId> SymbolTable;
+  typedef HashTableHost<const char*, traceid, ListEntry, JfrSymbolId> CStringTable;
+  friend class JfrArtifactSet;
  public:
   typedef SymbolTable::HashEntry SymbolEntry;
   typedef CStringTable::HashEntry CStringEntry;
  private:
   SymbolTable* _sym_table;
   CStringTable* _cstring_table;
+  const SymbolEntry* _sym_list;
+  const CStringEntry* _cstring_list;
   traceid _symbol_id_counter;
+  bool _class_unload;
 
   // hashtable(s) callbacks
-  void assign_id(SymbolEntry* entry);
-  bool equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry);
-  void assign_id(CStringEntry* entry);
-  bool equals(const char* query, uintptr_t hash, const CStringEntry* entry);
+  void on_link(const SymbolEntry* entry);
+  bool on_equals(uintptr_t hash, const SymbolEntry* entry);
+  void on_unlink(const SymbolEntry* entry);
+  void on_link(const CStringEntry* entry);
+  bool on_equals(uintptr_t hash, const CStringEntry* entry);
+  void on_unlink(const CStringEntry* entry);
+
+  template <typename Functor, typename T>
+  void iterate(Functor& functor, const T* list) {
+    const T* symbol = list;
+    while (symbol != NULL) {
+      const T* next = symbol->list_next();
+      functor(symbol);
+      symbol = next;
+    }
+  }
+
+  traceid mark_unsafe_anonymous_klass_name(const InstanceKlass* k, bool leakp);
+  bool is_unsafe_anonymous_klass(const Klass* k);
+  uintptr_t unsafe_anonymous_klass_name_hash(const InstanceKlass* ik);
 
  public:
-  static bool is_unsafe_anonymous_klass(const Klass* k);
-  static const char* create_unsafe_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t& hashcode);
-  static uintptr_t unsafe_anonymous_klass_name_hash_code(const InstanceKlass* ik);
-  static uintptr_t regular_klass_name_hash_code(const Klass* k);
-
   JfrSymbolId();
   ~JfrSymbolId();
 
-  void initialize();
   void clear();
-
-  traceid mark_unsafe_anonymous_klass_name(const Klass* k);
-  traceid mark(const Symbol* sym, uintptr_t hash);
-  traceid mark(const Klass* k);
-  traceid mark(const Symbol* symbol);
-  traceid mark(const char* str, uintptr_t hash);
+  void set_class_unload(bool class_unload);
 
-  const SymbolEntry* map_symbol(const Symbol* symbol) const;
-  const SymbolEntry* map_symbol(uintptr_t hash) const;
-  const CStringEntry* map_cstring(uintptr_t hash) const;
+  traceid mark(uintptr_t hash, const Symbol* sym, bool leakp);
+  traceid mark(const Klass* k, bool leakp);
+  traceid mark(const Symbol* symbol, bool leakp);
+  traceid mark(uintptr_t hash, const char* str, bool leakp);
+  traceid bootstrap_name(bool leakp);
 
-  template <typename T>
-  void symbol(T& functor, const Klass* k) {
-    if (is_unsafe_anonymous_klass(k)) {
-      return;
-    }
-    functor(map_symbol(regular_klass_name_hash_code(k)));
+  template <typename Functor>
+  void iterate_symbols(Functor& functor) {
+    iterate(functor, _sym_list);
   }
 
-  template <typename T>
-  void symbol(T& functor, const Method* method) {
-    assert(method != NULL, "invariant");
-    functor(map_symbol((uintptr_t)method->name()->identity_hash()));
-    functor(map_symbol((uintptr_t)method->signature()->identity_hash()));
-  }
-
-  template <typename T>
-  void cstring(T& functor, const Klass* k) {
-    if (!is_unsafe_anonymous_klass(k)) {
-      return;
-    }
-    functor(map_cstring(unsafe_anonymous_klass_name_hash_code((const InstanceKlass*)k)));
-  }
-
-  template <typename T>
-  void iterate_symbols(T& functor) {
-    _sym_table->iterate_entry(functor);
-  }
-
-  template <typename T>
-  void iterate_cstrings(T& functor) {
-    _cstring_table->iterate_entry(functor);
+  template <typename Functor>
+  void iterate_cstrings(Functor& functor) {
+    iterate(functor, _cstring_list);
   }
 
   bool has_entries() const { return has_symbol_entries() || has_cstring_entries(); }
-  bool has_symbol_entries() const { return _sym_table->has_entries(); }
-  bool has_cstring_entries() const { return _cstring_table->has_entries(); }
+  bool has_symbol_entries() const { return _sym_list != NULL; }
+  bool has_cstring_entries() const { return _cstring_list != NULL; }
 };
 
 /**
@@ -313,7 +293,7 @@
  private:
   JfrSymbolId* _symbol_id;
   GrowableArray<const Klass*>* _klass_list;
-  bool _class_unload;
+  size_t _total_count;
 
  public:
   JfrArtifactSet(bool class_unload);
@@ -323,11 +303,13 @@
   void initialize(bool class_unload);
   void clear();
 
-  traceid mark(const Symbol* sym, uintptr_t hash);
-  traceid mark(const Klass* klass);
-  traceid mark(const Symbol* symbol);
-  traceid mark(const char* const str, uintptr_t hash);
-  traceid mark_unsafe_anonymous_klass_name(const Klass* klass);
+
+  traceid mark(uintptr_t hash, const Symbol* sym, bool leakp);
+  traceid mark(const Klass* klass, bool leakp);
+  traceid mark(const Symbol* symbol, bool leakp);
+  traceid mark(uintptr_t hash, const char* const str, bool leakp);
+  traceid mark_unsafe_anonymous_klass_name(const Klass* klass, bool leakp);
+  traceid bootstrap_name(bool leakp);
 
   const JfrSymbolId::SymbolEntry* map_symbol(const Symbol* symbol) const;
   const JfrSymbolId::SymbolEntry* map_symbol(uintptr_t hash) const;
@@ -335,6 +317,7 @@
 
   bool has_klass_entries() const;
   int entries() const;
+  size_t total_count() const;
   void register_klass(const Klass* k);
 
   template <typename Functor>
@@ -355,6 +338,12 @@
   void iterate_cstrings(T& functor) {
     _symbol_id->iterate_cstrings(functor);
   }
+
+  template <typename Writer>
+  void tally(Writer& writer) {
+    _total_count += writer.count();
+  }
+
 };
 
 class KlassArtifactRegistrator {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
-#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
-
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-#include "jfr/utilities/jfrTypes.hpp"
-#include "memory/allocation.hpp"
-
-template <typename WriterImpl, u4 ID>
-class JfrArtifactWriterHost : public StackObj {
- private:
-  WriterImpl _impl;
-  JfrCheckpointWriter* _writer;
-  JfrCheckpointContext _ctx;
-  int64_t _count_offset;
-  int _count;
-  bool _skip_header;
- public:
-  JfrArtifactWriterHost(JfrCheckpointWriter* writer,
-                        JfrArtifactSet* artifacts,
-                        bool class_unload,
-                        bool skip_header = false) : _impl(writer, artifacts, class_unload),
-                                                    _writer(writer),
-                                                    _ctx(writer->context()),
-                                                    _count(0),
-                                                    _skip_header(skip_header) {
-    assert(_writer != NULL, "invariant");
-    if (!_skip_header) {
-      _writer->write_type((JfrTypeId)ID);
-      _count_offset = _writer->reserve(sizeof(u4)); // Don't know how many yet
-    }
-  }
-
-  ~JfrArtifactWriterHost() {
-    if (_count == 0) {
-      // nothing written, restore context for rewind
-      _writer->set_context(_ctx);
-      return;
-    }
-    assert(_count > 0, "invariant");
-    if (!_skip_header) {
-      _writer->write_count(_count, _count_offset);
-    }
-  }
-
-  bool operator()(typename WriterImpl::Type const & value) {
-    this->_count += _impl(value);
-    return true;
-  }
-
-  int count() const   { return _count; }
-  void add(int count) { _count += count; }
-};
-
-typedef int(*artifact_write_operation)(JfrCheckpointWriter*, JfrArtifactSet*, const void*);
-
-template <typename T, artifact_write_operation op>
-class JfrArtifactWriterImplHost {
- private:
-  JfrCheckpointWriter* _writer;
-  JfrArtifactSet* _artifacts;
-  bool _class_unload;
- public:
-  typedef T Type;
-  JfrArtifactWriterImplHost(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, bool class_unload) :
-    _writer(writer), _artifacts(artifacts), _class_unload(class_unload) {}
-  int operator()(T const& value) {
-    return op(this->_writer, this->_artifacts, value);
-  }
-};
-
-template <typename T, typename Predicate, artifact_write_operation op>
-class JfrPredicatedArtifactWriterImplHost : public JfrArtifactWriterImplHost<T, op> {
- private:
-  Predicate _predicate;
-  typedef JfrArtifactWriterImplHost<T, op> Parent;
- public:
-  JfrPredicatedArtifactWriterImplHost(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, bool class_unload) :
-    Parent(writer, artifacts, class_unload), _predicate(class_unload) {}
-  int operator()(T const& value) {
-    return _predicate(value) ? Parent::operator()(value) : 0;
-  }
-};
-
-#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESETWRITER_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -159,7 +159,7 @@
   // This mechanism will retain the event specific flags
   // in the archive, allowing for event flag restoration
   // when renewing the traceid on klass revival.
-  k->set_trace_id(EVENT_FLAGS_MASK(k));
+  k->set_trace_id(EVENT_KLASS_MASK(k));
 }
 
 // used by CDS / APPCDS as part of "restore_unshareable_info"
@@ -181,12 +181,12 @@
   return get(java_lang_Class::as_Klass(my_oop));
 }
 
-traceid JfrTraceId::use(jclass jc, bool leakp /* false */) {
+traceid JfrTraceId::use(jclass jc) {
   assert(jc != NULL, "invariant");
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
   const oop my_oop = JNIHandles::resolve(jc);
   assert(my_oop != NULL, "invariant");
-  return use(java_lang_Class::as_Klass(my_oop), leakp);
+  return use(java_lang_Class::as_Klass(my_oop));
 }
 
 bool JfrTraceId::in_visible_set(const jclass jc) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -90,12 +90,16 @@
   static traceid get(const Thread* thread);
 
   // tag construct as used, returns pre-tagged traceid
-  static traceid use(const Klass* klass, bool leakp = false);
-  static traceid use(jclass jc, bool leakp = false);
-  static traceid use(const Method* method, bool leakp = false);
-  static traceid use(const ModuleEntry* module, bool leakp = false);
-  static traceid use(const PackageEntry* package, bool leakp = false);
-  static traceid use(const ClassLoaderData* cld, bool leakp = false);
+  static traceid use(const Klass* klass);
+  static traceid use(jclass jc);
+  static traceid use(const Method* method);
+  static traceid use(const Klass* klass, const Method* method);
+  static traceid use(const ModuleEntry* module);
+  static traceid use(const PackageEntry* package);
+  static traceid use(const ClassLoaderData* cld);
+
+  // leak profiler
+  static void set_leakp(const Method* method);
 
   static void remove(const Klass* klass);
   static void restore(const Klass* klass);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -28,8 +28,11 @@
 #include "classfile/classLoaderData.hpp"
 #include "classfile/moduleEntry.hpp"
 #include "classfile/packageEntry.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
-#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp"
+#include "jfr/support/jfrKlassExtension.hpp"
 #include "oops/arrayKlass.hpp"
 #include "oops/klass.hpp"
 #include "oops/instanceKlass.hpp"
@@ -38,21 +41,11 @@
 #include "utilities/debug.hpp"
 
 template <typename T>
-inline traceid set_used_and_get(const T* type, bool leakp) {
+inline traceid set_used_and_get(const T* type) {
   assert(type != NULL, "invariant");
-  if (leakp) {
-    SET_LEAKP_USED_THIS_EPOCH(type);
-    assert(LEAKP_USED_THIS_EPOCH(type), "invariant");
-  }
   SET_USED_THIS_EPOCH(type);
   assert(USED_THIS_EPOCH(type), "invariant");
-  return TRACE_ID_MASKED_PTR(type);
-}
-
-template <typename T>
-inline traceid set_used_and_get_shifted(const T* type, bool leakp) {
-  assert(type != NULL, "invariant");
-  return set_used_and_get(type, leakp) >> TRACE_ID_SHIFT;
+  return TRACE_ID(type);
 }
 
 inline traceid JfrTraceId::get(const Klass* klass) {
@@ -65,38 +58,49 @@
   return TRACE_ID_RAW(t->jfr_thread_local());
 }
 
-inline traceid JfrTraceId::use(const Klass* klass, bool leakp /* false */) {
+inline traceid JfrTraceId::use(const Klass* klass) {
   assert(klass != NULL, "invariant");
-  return set_used_and_get_shifted(klass, leakp);
+  return set_used_and_get(klass);
 }
 
-inline traceid JfrTraceId::use(const Method* method, bool leakp /* false */) {
+inline traceid JfrTraceId::use(const Method* method) {
+  assert(method != NULL, "invariant");
+  return use(method->method_holder(), method);
+}
+
+inline traceid JfrTraceId::use(const Klass* klass, const Method* method) {
+  assert(klass != NULL, "invariant");
   assert(method != NULL, "invariant");
   SET_METHOD_FLAG_USED_THIS_EPOCH(method);
-  const Klass* const klass = method->method_holder();
-  assert(klass != NULL, "invariant");
-  if (leakp) {
-    SET_LEAKP_USED_THIS_EPOCH(klass);
-    assert(LEAKP_USED_THIS_EPOCH(klass), "invariant");
-  }
+
   SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
   assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
   return (METHOD_ID(klass, method));
 }
 
-inline traceid JfrTraceId::use(const ModuleEntry* module, bool leakp /* false */) {
+inline traceid JfrTraceId::use(const ModuleEntry* module) {
   assert(module != NULL, "invariant");
-  return set_used_and_get_shifted(module, leakp);
+  return set_used_and_get(module);
+}
+
+inline traceid JfrTraceId::use(const PackageEntry* package) {
+  assert(package != NULL, "invariant");
+  return set_used_and_get(package);
 }
 
-inline traceid JfrTraceId::use(const PackageEntry* package, bool leakp /* false */) {
-  assert(package != NULL, "invariant");
-  return set_used_and_get_shifted(package, leakp);
+inline traceid JfrTraceId::use(const ClassLoaderData* cld) {
+  assert(cld != NULL, "invariant");
+  return cld->is_unsafe_anonymous() ? 0 : set_used_and_get(cld);
 }
 
-inline traceid JfrTraceId::use(const ClassLoaderData* cld, bool leakp /* false */) {
-  assert(cld != NULL, "invariant");
-  return cld->is_unsafe_anonymous() ? 0 : set_used_and_get_shifted(cld, leakp);
+inline void JfrTraceId::set_leakp(const Method* method) {
+  assert(method != NULL, "invariant");
+  const Klass* const klass = method->method_holder();
+  assert(klass != NULL, "invariant");
+  assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
+  assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
+  SET_LEAKP(klass);
+  SET_METHOD_LEAKP(method);
 }
 
 inline bool JfrTraceId::in_visible_set(const Klass* klass) {
@@ -112,7 +116,7 @@
 
 inline void JfrTraceId::tag_as_jdk_jfr_event(const Klass* klass) {
   assert(klass != NULL, "invariant");
-  SET_TAG(klass, JDK_JFR_EVENT_KLASS);
+  SET_JDK_JFR_EVENT_KLASS(klass);
   assert(IS_JDK_JFR_EVENT_KLASS(klass), "invariant");
 }
 
@@ -124,7 +128,7 @@
 inline void JfrTraceId::tag_as_jdk_jfr_event_sub(const Klass* k) {
   assert(k != NULL, "invariant");
   if (IS_NOT_AN_EVENT_SUB_KLASS(k)) {
-    SET_TAG(k, JDK_JFR_EVENT_SUBKLASS);
+    SET_JDK_JFR_EVENT_SUBKLASS(k);
   }
   assert(IS_JDK_JFR_EVENT_SUBKLASS(k), "invariant");
 }
@@ -145,7 +149,7 @@
 
 inline void JfrTraceId::tag_as_event_host(const Klass* k) {
   assert(k != NULL, "invariant");
-  SET_TAG(k, EVENT_HOST_KLASS);
+  SET_EVENT_HOST_KLASS(k);
   assert(IS_EVENT_HOST_KLASS(k), "invariant");
 }
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -27,20 +27,22 @@
 
 #include "jfr/utilities/jfrTypes.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/macros.hpp"
 
 #ifdef VM_LITTLE_ENDIAN
 static const int low_offset = 0;
-static const int leakp_offset = low_offset + 1;
+static const int meta_offset = low_offset + 1;
 #else
 static const int low_offset = 7;
-static const int leakp_offset = low_offset - 1;
+static const int meta_offset = low_offset - 1;
 #endif
 
 inline void set_bits(jbyte bits, jbyte* const dest) {
   assert(dest != NULL, "invariant");
   if (bits != (*dest & bits)) {
     *dest |= bits;
+    OrderAccess::storestore();
   }
 }
 
@@ -92,16 +94,28 @@
   set_mask(mask, ((jbyte*)dest) + low_offset);
 }
 
-inline void set_leakp_traceid_bits(jbyte bits, traceid* dest) {
-  set_bits(bits, ((jbyte*)dest) + leakp_offset);
+inline void set_meta_bits(jbyte bits, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  *dest |= bits;
+}
+
+inline void set_traceid_meta_bits(jbyte bits, traceid* dest) {
+  set_meta_bits(bits, ((jbyte*)dest) + meta_offset);
 }
 
-inline void set_leakp_traceid_bits_cas(jbyte bits, traceid* dest) {
-  set_bits_cas(bits, ((jbyte*)dest) + leakp_offset);
+inline void set_meta_mask(jbyte mask, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  *dest &= mask;
 }
 
-inline void set_leakp_traceid_mask(jbyte mask, traceid* dest) {
-  set_mask(mask, ((jbyte*)dest) + leakp_offset);
+inline void set_traceid_meta_mask(jbyte mask, traceid* dest) {
+  set_meta_mask(mask, ((jbyte*)dest) + meta_offset);
+}
+
+// only used by a single thread with no visibility requirements
+inline void clear_meta_bits(jbyte bits, jbyte* const dest) {
+  assert(dest != NULL, "invariant");
+  *dest ^= bits;
 }
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -32,12 +32,8 @@
 #define METHOD_USED_BIT (USED_BIT << 2)
 #define EPOCH_1_SHIFT 0
 #define EPOCH_2_SHIFT 1
-#define LEAKP_SHIFT 8
-
 #define USED_EPOCH_1_BIT (USED_BIT << EPOCH_1_SHIFT)
 #define USED_EPOCH_2_BIT (USED_BIT << EPOCH_2_SHIFT)
-#define LEAKP_USED_EPOCH_1_BIT (USED_EPOCH_1_BIT << LEAKP_SHIFT)
-#define LEAKP_USED_EPOCH_2_BIT (USED_EPOCH_2_BIT << LEAKP_SHIFT)
 #define METHOD_USED_EPOCH_1_BIT (METHOD_USED_BIT << EPOCH_1_SHIFT)
 #define METHOD_USED_EPOCH_2_BIT (METHOD_USED_BIT << EPOCH_2_SHIFT)
 #define METHOD_AND_CLASS_IN_USE_BITS (METHOD_USED_BIT | USED_BIT)
@@ -75,14 +71,6 @@
     return _epoch_state ? USED_EPOCH_1_BIT : USED_EPOCH_2_BIT;
   }
 
-  static traceid leakp_in_use_this_epoch_bit() {
-    return _epoch_state ? LEAKP_USED_EPOCH_2_BIT : LEAKP_USED_EPOCH_1_BIT;
-  }
-
-  static traceid leakp_in_use_prev_epoch_bit() {
-    return _epoch_state ? LEAKP_USED_EPOCH_1_BIT : LEAKP_USED_EPOCH_2_BIT;
-  }
-
   static traceid method_in_use_this_epoch_bit() {
     return _epoch_state ? METHOD_USED_EPOCH_2_BIT : METHOD_USED_EPOCH_1_BIT;
   }
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -25,163 +25,117 @@
 #ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
 #define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
 
-#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
-#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
-#include "jfr/support/jfrKlassExtension.hpp"
-#include "utilities/globalDefinitions.hpp"
-
 /**
  *
  * If a traceid is used, depending on epoch, either the first or the second bit is tagged.
  * If a class member (method) is used, either the third or fourth bit is tagged.
  * Which bit to set is a function of the epoch. This allows for concurrent tagging.
  *
- * LeakProfiler subsystem gets its own byte and uses the same tagging scheme but is shifted up 8.
- *
- * We also tag the individual method by using the TraceFlag field,
+ * We also tag individual methods by using the _trace_flags field,
  * (see jfr/support/jfrTraceIdExtension.hpp for details)
  *
  */
 
-// these are defined in jfr/support/jfrKlassExtension.hpp
+// the following are defined in jfr/support/jfrKlassExtension.hpp
 //
-// #define JDK_JFR_EVENT_SUBKLASS  16
-// #define JDK_JFR_EVENT_KLASS     32
-// #define EVENT_HOST_KLASS        64
-
-#define IS_JDK_JFR_EVENT_SUBKLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_SUBKLASS)) != 0)
+// #define JDK_JFR_EVENT_SUBKLASS                 16
+// #define JDK_JFR_EVENT_KLASS                    32
+// #define EVENT_HOST_KLASS                       64
 
-#define ANY_USED_BITS (USED_EPOCH_2_BIT         | \
-                       USED_EPOCH_1_BIT         | \
-                       METHOD_USED_EPOCH_2_BIT  | \
-                       METHOD_USED_EPOCH_1_BIT  | \
-                       LEAKP_USED_EPOCH_2_BIT   | \
-                       LEAKP_USED_EPOCH_1_BIT)
-
-#define TRACE_ID_META_BITS (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS | ANY_USED_BITS)
-
-#define ANY_EVENT                       (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
-#define IS_JDK_JFR_EVENT_KLASS(ptr)     (((ptr)->trace_id() & JDK_JFR_EVENT_KLASS) != 0)
-#define IS_EVENT_HOST_KLASS(ptr)        (((ptr)->trace_id() & EVENT_HOST_KLASS) != 0)
-#define IS_NOT_AN_EVENT_KLASS(ptr)      (!IS_EVENT_KLASS(ptr))
-#define IS_NOT_AN_EVENT_SUB_KLASS(ptr)  (!IS_JDK_JFR_EVENT_SUBKLASS(ptr))
-#define IS_NOT_JDK_JFR_EVENT_KLASS(ptr) (!IS_JDK_JFR_EVENT_KLASS(ptr))
-#define EVENT_FLAGS_MASK(ptr)           (((ptr)->trace_id() & ANY_EVENT) != 0)
-#define UNEVENT(ptr)                    ((ptr)->set_trace_id(((ptr)->trace_id()) & ~ANY_EVENT))
-
-#define TRACE_ID_SHIFT 16
+// static bits
+#define META_SHIFT                                8
+#define LEAKP_META_BIT                            USED_BIT
+#define LEAKP_BIT                                 (LEAKP_META_BIT << META_SHIFT)
+#define TRANSIENT_META_BIT                        (USED_BIT << 1)
+#define TRANSIENT_BIT                             (TRANSIENT_META_BIT << META_SHIFT)
+#define SERIALIZED_META_BIT                       (USED_BIT << 2)
+#define SERIALIZED_BIT                            (SERIALIZED_META_BIT << META_SHIFT)
+#define TRACE_ID_SHIFT                            16
+#define METHOD_ID_NUM_MASK                        ((1 << TRACE_ID_SHIFT) - 1)
+#define META_BITS                                 (SERIALIZED_BIT | TRANSIENT_BIT | LEAKP_BIT)
+#define EVENT_BITS                                (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
+#define USED_BITS                                 (METHOD_USED_EPOCH_2_BIT | METHOD_USED_EPOCH_1_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)
+#define ALL_BITS                                  (META_BITS | EVENT_BITS | USED_BITS)
+#define ALL_BITS_MASK                             (~(ALL_BITS))
 
-#define TRACE_ID_MASKED(id)             (id & ~TRACE_ID_META_BITS)
-#define TRACE_ID_VALUE(id)              (TRACE_ID_MASKED(id) >> TRACE_ID_SHIFT)
-#define TRACE_ID_MASKED_PTR(ptr)        (TRACE_ID_MASKED((ptr)->trace_id()))
-#define TRACE_ID_RAW(ptr)               ((ptr)->trace_id())
-#define TRACE_ID(ptr)                   (TRACE_ID_MASKED_PTR(ptr) >> TRACE_ID_SHIFT)
-#define METHOD_ID(kls, meth)            (TRACE_ID_MASKED_PTR(kls) | (meth)->method_idnum())
-#define SET_TAG(ptr, tag)               (set_traceid_bits(tag, (ptr)->trace_id_addr()))
-#define SET_LEAKP_TAG(ptr, tag)         (set_leakp_traceid_bits(tag, (ptr)->trace_id_addr()))
-#define SET_TAG_CAS(ptr, tag)           (set_traceid_bits_cas(tag, (ptr)->trace_id_addr()))
-#define SET_LEAKP_TAG_CAS(ptr, tag)     (set_leakp_traceid_bits_cas(tag, (ptr)->trace_id_addr()))
-
-#define IN_USE_THIS_EPOCH_BIT           (JfrTraceIdEpoch::in_use_this_epoch_bit())
-#define IN_USE_PREV_EPOCH_BIT           (JfrTraceIdEpoch::in_use_prev_epoch_bit())
-#define LEAKP_IN_USE_THIS_EPOCH_BIT     (JfrTraceIdEpoch::leakp_in_use_this_epoch_bit())
-#define LEAKP_IN_USE_PREV_EPOCH_BIT     (JfrTraceIdEpoch::leakp_in_use_prev_epoch_bit())
+// epoch relative bits
+#define IN_USE_THIS_EPOCH_BIT                     (JfrTraceIdEpoch::in_use_this_epoch_bit())
+#define IN_USE_PREV_EPOCH_BIT                     (JfrTraceIdEpoch::in_use_prev_epoch_bit())
+#define METHOD_IN_USE_THIS_EPOCH_BIT              (JfrTraceIdEpoch::method_in_use_this_epoch_bit())
+#define METHOD_IN_USE_PREV_EPOCH_BIT              (JfrTraceIdEpoch::method_in_use_prev_epoch_bit())
+#define METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS   (JfrTraceIdEpoch::method_and_class_in_use_this_epoch_bits())
+#define METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS   (JfrTraceIdEpoch::method_and_class_in_use_prev_epoch_bits())
+#define METHOD_FLAG_IN_USE_THIS_EPOCH_BIT         ((jbyte)IN_USE_THIS_EPOCH_BIT)
+#define METHOD_FLAG_IN_USE_PREV_EPOCH_BIT         ((jbyte)IN_USE_PREV_EPOCH_BIT)
 
-#define METHOD_IN_USE_THIS_EPOCH_BIT    (JfrTraceIdEpoch::method_in_use_this_epoch_bit())
-#define METHOD_IN_USE_PREV_EPOCH_BIT    (JfrTraceIdEpoch::method_in_use_prev_epoch_bit())
-#define METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_this_epoch_bits())
-#define METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_prev_epoch_bits())
-
-#define UNUSE_THIS_EPOCH_MASK           (~(IN_USE_THIS_EPOCH_BIT))
-#define UNUSE_PREV_EPOCH_MASK           (~(IN_USE_PREV_EPOCH_BIT))
-#define LEAKP_UNUSE_THIS_EPOCH_MASK     UNUSE_THIS_EPOCH_MASK
-#define LEAKP_UNUSE_PREV_EPOCH_MASK     UNUSE_PREV_EPOCH_MASK
-
-#define UNUSE_METHOD_THIS_EPOCH_MASK    (~(METHOD_IN_USE_THIS_EPOCH_BIT))
-#define UNUSE_METHOD_PREV_EPOCH_MASK    (~(METHOD_IN_USE_PREV_EPOCH_BIT))
-#define LEAKP_UNUSE_METHOD_THIS_EPOCH_MASK (~(UNUSE_METHOD_THIS_EPOCH_MASK))
-#define LEAKP_UNUSE_METHOD_PREV_EPOCH_MASK (~UNUSE_METHOD_PREV_EPOCH_MASK))
-
-#define UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK (~(METHOD_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT))
-#define UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK (~(METHOD_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT))
-
-#define SET_USED_THIS_EPOCH(ptr)        (SET_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
-#define SET_USED_PREV_EPOCH(ptr)        (SET_TAG_CAS(ptr, IN_USE_PREV_EPOCH_BIT))
-#define SET_LEAKP_USED_THIS_EPOCH(ptr)  (SET_LEAKP_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
-#define SET_LEAKP_USED_PREV_EPOCH(ptr)  (SET_LEAKP_TAG_CAS(ptr, IN_USE_PREV_EPOCH_BIT))
-#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (SET_TAG(kls, METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS))
+// operators
+#define TRACE_ID_RAW(ptr)                         ((ptr)->trace_id())
+#define TRACE_ID(ptr)                             (TRACE_ID_RAW(ptr) >> TRACE_ID_SHIFT)
+#define TRACE_ID_MASKED(ptr)                      (TRACE_ID_RAW(ptr) & ALL_BITS_MASK)
+#define TRACE_ID_PREDICATE(ptr, bits)             ((TRACE_ID_RAW(ptr) & bits) != 0)
+#define TRACE_ID_TAG(ptr, bits)                   (set_traceid_bits(bits, (ptr)->trace_id_addr()))
+#define TRACE_ID_TAG_CAS(ptr, bits)               (set_traceid_bits_cas(bits, (ptr)->trace_id_addr()))
+#define TRACE_ID_CLEAR(ptr, bits)                 (set_traceid_mask(bits, (ptr)->trace_id_addr()))
+#define TRACE_ID_META_TAG(ptr, bits)              (set_traceid_meta_bits(bits, (ptr)->trace_id_addr()))
+#define TRACE_ID_META_CLEAR(ptr, bits)            (set_traceid_meta_mask(bits, (ptr)->trace_id_addr()))
+#define METHOD_ID(kls, method)                    (TRACE_ID_MASKED(kls) | (method)->orig_method_idnum())
+#define METHOD_FLAG_PREDICATE(method, bits)       ((method)->is_trace_flag_set(bits))
+#define METHOD_FLAG_TAG(method, bits)             (set_bits(bits, (method)->trace_flags_addr()))
+#define METHOD_META_TAG(method, bits)             (set_meta_bits(bits, (method)->trace_meta_addr()))
+#define METHOD_FLAG_CLEAR(method, bits)           (clear_bits_cas(bits, (method)->trace_flags_addr()))
+#define METHOD_META_CLEAR(method, bits)           (set_meta_mask(bits, (method)->trace_meta_addr()))
 
-#define USED_THIS_EPOCH(ptr)            (((ptr)->trace_id() & IN_USE_THIS_EPOCH_BIT) != 0)
-#define NOT_USED_THIS_EPOCH(ptr)        (!USED_THIS_EPOCH(ptr))
-#define USED_PREV_EPOCH(ptr)            (((ptr)->trace_id() & IN_USE_PREV_EPOCH_BIT) != 0)
-#define NOT_USED_PREV_EPOCH(ptr)        (!USED_PREV_EPOCH(ptr))
-#define USED_ANY_EPOCH(ptr)             (((ptr)->trace_id() & (USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)) != 0)
-#define NOT_USED_ANY_EPOCH(ptr)         (!USED_ANY_EPOCH(ptr))
-
-#define LEAKP_USED_THIS_EPOCH(ptr)      (((ptr)->trace_id() & LEAKP_IN_USE_THIS_EPOCH_BIT) != 0)
-#define LEAKP_NOT_USED_THIS_EPOCH(ptr)  (!LEAKP_USED_THIS_EPOCH(ptr))
-#define LEAKP_USED_PREV_EPOCH(ptr)      (((ptr)->trace_id() & LEAKP_IN_USE_PREV_EPOCH_BIT) != 0)
-#define LEAKP_NOT_USED_PREV_EPOCH(ptr)  (!LEAKP_USED_PREV_EPOCH(ptr))
-#define LEAKP_USED_ANY_EPOCH(ptr)       (((ptr)->trace_id() & (LEAKP_USED_EPOCH_2_BIT | LEAKP_USED_EPOCH_1_BIT)) != 0)
-#define LEAKP_NOT_USED_ANY_EPOCH(ptr)   (!LEAKP_USED_ANY_EPOCH(ptr))
+// predicates
+#define USED_THIS_EPOCH(ptr)                      (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_THIS_EPOCH_BIT)))
+#define NOT_USED_THIS_EPOCH(ptr)                  (!(USED_THIS_EPOCH(ptr)))
+#define USED_PREV_EPOCH(ptr)                      (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_PREV_EPOCH_BIT)))
+#define USED_ANY_EPOCH(ptr)                       (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)))
+#define METHOD_USED_THIS_EPOCH(kls)               (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_NOT_USED_THIS_EPOCH(kls)           (!(METHOD_USED_THIS_EPOCH(kls)))
+#define METHOD_USED_PREV_EPOCH(kls)               (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_PREV_EPOCH_BIT)))
+#define METHOD_USED_ANY_EPOCH(kls)                (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls)     (TRACE_ID_PREDICATE(kls, (METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS)))
+#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls)     (TRACE_ID_PREDICATE(kls, (METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS)))
+#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls)      (METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls))
+#define METHOD_FLAG_USED_THIS_EPOCH(method)       (METHOD_FLAG_PREDICATE(method, (METHOD_FLAG_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_FLAG_NOT_USED_THIS_EPOCH(method)   (!(METHOD_FLAG_USED_THIS_EPOCH(method)))
+#define METHOD_FLAG_USED_PREV_EPOCH(method)       (METHOD_FLAG_PREDICATE(method, (METHOD_FLAG_IN_USE_PREV_EPOCH_BIT)))
 
-#define ANY_USED_THIS_EPOCH(ptr)        (((ptr)->trace_id() & (LEAKP_IN_USE_THIS_EPOCH_BIT | IN_USE_THIS_EPOCH_BIT)) != 0)
-#define ANY_NOT_USED_THIS_EPOCH(ptr)    (!ANY_USED_THIS_EPOCH(ptr))
-#define ANY_USED_PREV_EPOCH(ptr)        (((ptr)->trace_id() & (LEAKP_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT)) != 0)
-#define ANY_NOT_USED_PREV_EPOCH(ptr)    (!ANY_USED_PREV_EPOCH(ptr))
-
-#define METHOD_USED_THIS_EPOCH(kls)     (((kls)->trace_id() & METHOD_IN_USE_THIS_EPOCH_BIT) != 0)
-#define METHOD_NOT_USED_THIS_EPOCH(kls) (!METHOD_USED_THIS_EPOCH(kls))
-#define METHOD_USED_PREV_EPOCH(kls)     (((kls)->trace_id() & METHOD_IN_USE_PREV_EPOCH_BIT) != 0)
-#define METHOD_NOT_USED_PREV_EPOCH(kls) (!METHOD_USED_PREV_EPOCH(kls))
-#define METHOD_USED_ANY_EPOCH(kls)      (((kls)->trace_id() & (METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)) != 0)
-
-#define METHOD_NOT_USED_ANY_EPOCH(kls)  (!METHOD_USED_ANY_EPOCH(kls))
-
-#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls) ((((kls)->trace_id() & METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS) == \
-                                                                     METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS) != 0)
-
-#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls) ((((kls)->trace_id() & METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS) == \
-                                                                     METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS) != 0)
-
-#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls)     ((METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls)) != 0)
-#define METHOD_AND_CLASS_NOT_USED_ANY_EPOCH(kls) (!METHOD_AND_CLASS_USED_ANY_EPOCH(kls))
+// setters
+#define SET_USED_THIS_EPOCH(ptr)                  (TRACE_ID_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
+#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_TAG(kls, METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS))
+#define SET_METHOD_FLAG_USED_THIS_EPOCH(method)   (METHOD_FLAG_TAG(method, METHOD_FLAG_IN_USE_THIS_EPOCH_BIT))
+#define CLEAR_METHOD_AND_CLASS_PREV_EPOCH_MASK    (~(METHOD_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT))
+#define CLEAR_METHOD_AND_CLASS_PREV_EPOCH(kls)    (TRACE_ID_CLEAR(kls, CLEAR_METHOD_AND_CLASS_PREV_EPOCH_MASK))
+#define CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method) (METHOD_FLAG_CLEAR(method, METHOD_FLAG_IN_USE_PREV_EPOCH_BIT))
 
-#define LEAKP_METHOD_IN_USE_THIS_EPOCH  (LEAKP_IN_USE_THIS_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)
-#define LEAKP_METHOD_IN_USE_PREV_EPOCH  (LEAKP_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_PREV_EPOCH_BIT)
-#define LEAKP_METHOD_USED_THIS_EPOCH(ptr)  ((((ptr)->trace_id() & LEAKP_METHOD_IN_USE_THIS_EPOCH) == \
-                                                                  LEAKP_METHOD_IN_USE_THIS_EPOCH) != 0)
-#define LEAKP_METHOD_NOT_USED_THIS_EPOCH(kls) (!LEAKP_METHOD_USED_THIS_EPOCH(kls))
-#define LEAKP_METHOD_USED_PREV_EPOCH(ptr)  ((((ptr)->trace_id() & LEAKP_METHOD_IN_USE_PREV_EPOCH) == \
-                                                                  LEAKP_METHOD_IN_USE_PREV_EPOCH) != 0)
-#define LEAKP_METHOD_NOT_USED_PREV_EPOCH(kls) (!LEAKP_METHOD_USED_PREV_EPOCH(kls))
-
-#define UNUSE_THIS_EPOCH(ptr)           (set_traceid_mask(UNUSE_THIS_EPOCH_MASK, (ptr)->trace_id_addr()))
-#define UNUSE_PREV_EPOCH(ptr)           (set_traceid_mask(UNUSE_PREV_EPOCH_MASK, (ptr)->trace_id_addr()))
-#define UNUSE_METHOD_THIS_EPOCH(kls)    (set_traceid_mask(UNUSE_METHOD_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
-#define UNUSE_METHOD_PREV_EPOCH(kls)    (set_traceid_mask(UNUSE_METHOD_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
+// types
+#define IS_JDK_JFR_EVENT_KLASS(kls)               (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_KLASS))
+#define IS_JDK_JFR_EVENT_SUBKLASS(kls)            (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_SUBKLASS))
+#define IS_NOT_AN_EVENT_SUB_KLASS(kls)            (!(IS_JDK_JFR_EVENT_SUBKLASS(kls)))
+#define IS_EVENT_HOST_KLASS(kls)                  (TRACE_ID_PREDICATE(kls, EVENT_HOST_KLASS))
+#define SET_JDK_JFR_EVENT_KLASS(kls)              (TRACE_ID_TAG(kls, JDK_JFR_EVENT_KLASS))
+#define SET_JDK_JFR_EVENT_SUBKLASS(kls)           (TRACE_ID_TAG(kls, JDK_JFR_EVENT_SUBKLASS))
+#define SET_EVENT_HOST_KLASS(kls)                 (TRACE_ID_TAG(kls, EVENT_HOST_KLASS))
+#define EVENT_KLASS_MASK(kls)                     (TRACE_ID_RAW(kls) & EVENT_BITS)
 
-#define LEAKP_UNUSE_THIS_EPOCH(ptr)     (set_leakp_traceid_mask(UNUSE_THIS_EPOCH_MASK, (ptr)->trace_id_addr()))
-#define LEAKP_UNUSE_PREV_EPOCH(ptr)     (set_leakp_traceid_mask(UNUSE_PREV_EPOCH_MASK, (ptr)->trace_id_addr()))
-#define LEAKP_UNUSE_METHOD_THIS_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
-#define LEAKP_UNUSE_METHOD_PREV_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
-
-#define ANY_USED(ptr)                   (((ptr)->trace_id() & ANY_USED_BITS) != 0)
-#define ANY_NOT_USED(ptr)               (!ANY_USED(ptr))
-
-#define UNUSE_METHOD_AND_CLASS_THIS_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
-#define LEAKP_UNUSE_METHOD_AND_CLASS_THIS_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_AND_CLASS_THIS_EPOCH_MASK, (kls)->trace_id_addr()))
-#define UNUSE_METHOD_AND_CLASS_PREV_EPOCH(kls) (set_traceid_mask(UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
-#define LEAKP_UNUSE_METHODS_AND_CLASS_PREV_EPOCH(kls) (set_leakp_traceid_mask(UNUSE_METHOD_AND_CLASS_PREV_EPOCH_MASK, (kls)->trace_id_addr()))
-
-#define METHOD_FLAG_USED_THIS_EPOCH(m)       ((m)->is_trace_flag_set((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit()))
-#define METHOD_FLAG_NOT_USED_THIS_EPOCH(m)   (!METHOD_FLAG_USED_THIS_EPOCH(m))
-#define SET_METHOD_FLAG_USED_THIS_EPOCH(m)   ((m)->set_trace_flag((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit()))
-#define METHOD_FLAG_USED_PREV_EPOCH(m)       ((m)->is_trace_flag_set((jbyte)JfrTraceIdEpoch::in_use_prev_epoch_bit()))
-#define METHOD_FLAG_NOT_USED_PREV_EPOCH(m)   (!METHOD_FLAG_USED_PREV_EPOCH(m))
-#define METHOD_FLAG_USED_ANY_EPOCH(m)        ((METHOD_FLAG_USED_THIS_EPOCH(m) || METHOD_FLAG_USED_PREV_EPOCH(m)) != 0)
-#define METHOD_FLAG_NOT_USED_ANY_EPOCH(m)    ((METHOD_FLAG_NOT_USED_THIS_EPOCH(m) && METHOD_FLAG_NOT_USED_PREV_EPOCH(m)) != 0)
-#define CLEAR_METHOD_FLAG_USED_THIS_EPOCH(m) (clear_bits_cas((jbyte)JfrTraceIdEpoch::in_use_this_epoch_bit(), (m)->trace_flags_addr()))
-#define CLEAR_METHOD_FLAG_USED_PREV_EPOCH(m) (clear_bits_cas((jbyte)JfrTraceIdEpoch::in_use_prev_epoch_bit(), (m)->trace_flags_addr()))
+// meta
+#define META_MASK                                 (~(SERIALIZED_META_BIT | TRANSIENT_META_BIT | LEAKP_META_BIT))
+#define SET_LEAKP(ptr)                            (TRACE_ID_META_TAG(ptr, LEAKP_META_BIT))
+#define IS_LEAKP(ptr)                             (TRACE_ID_PREDICATE(ptr, LEAKP_BIT))
+#define SET_TRANSIENT(ptr)                        (TRACE_ID_META_TAG(ptr, TRANSIENT_META_BIT))
+#define IS_SERIALIZED(ptr)                        (TRACE_ID_PREDICATE(ptr, SERIALIZED_BIT))
+#define IS_NOT_SERIALIZED(ptr)                    (!(IS_SERIALIZED(ptr)))
+#define SHOULD_TAG(ptr)                           (NOT_USED_THIS_EPOCH(ptr))
+#define SHOULD_TAG_KLASS_METHOD(ptr)              (METHOD_NOT_USED_THIS_EPOCH(ptr))
+#define SET_SERIALIZED(ptr)                       (TRACE_ID_META_TAG(ptr, SERIALIZED_META_BIT))
+#define CLEAR_SERIALIZED(ptr)                     (TRACE_ID_META_CLEAR(ptr, META_MASK))
+#define IS_METHOD_SERIALIZED(method)              (METHOD_FLAG_PREDICATE(method, SERIALIZED_BIT))
+#define IS_METHOD_LEAKP_USED(method)              (METHOD_FLAG_PREDICATE(method, LEAKP_BIT))
+#define METHOD_NOT_SERIALIZED(method)             (!(IS_METHOD_SERIALIZED(method)))
+#define SET_METHOD_LEAKP(method)                  (METHOD_META_TAG(method, LEAKP_META_BIT))
+#define SET_METHOD_SERIALIZED(method)             (METHOD_META_TAG(method, SERIALIZED_META_BIT))
+#define CLEAR_METHOD_SERIALIZED(method)           (METHOD_META_CLEAR(method, META_MASK))
+#define CLEAR_LEAKP(ptr)                          (TRACE_ID_META_CLEAR(ptr, (~(LEAKP_META_BIT))))
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -39,7 +39,7 @@
   _start_nanos(0),
   _previous_start_ticks(0),
   _previous_start_nanos(0),
-  _previous_checkpoint_offset(0) {}
+  _last_checkpoint_offset(0) {}
 
 JfrChunkState::~JfrChunkState() {
   reset();
@@ -50,15 +50,15 @@
     JfrCHeapObj::free(_path, strlen(_path) + 1);
     _path = NULL;
   }
-  set_previous_checkpoint_offset(0);
+  set_last_checkpoint_offset(0);
 }
 
-void JfrChunkState::set_previous_checkpoint_offset(int64_t offset) {
-  _previous_checkpoint_offset = offset;
+void JfrChunkState::set_last_checkpoint_offset(int64_t offset) {
+  _last_checkpoint_offset = offset;
 }
 
-int64_t JfrChunkState::previous_checkpoint_offset() const {
-  return _previous_checkpoint_offset;
+int64_t JfrChunkState::last_checkpoint_offset() const {
+  return _last_checkpoint_offset;
 }
 
 int64_t JfrChunkState::previous_start_ticks() const {
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkState.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkState.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -36,7 +36,7 @@
   int64_t _start_nanos;
   int64_t _previous_start_ticks;
   int64_t _previous_start_nanos;
-  int64_t _previous_checkpoint_offset;
+  int64_t _last_checkpoint_offset;
 
   void update_start_ticks();
   void update_start_nanos();
@@ -46,8 +46,8 @@
   JfrChunkState();
   ~JfrChunkState();
   void reset();
-  int64_t previous_checkpoint_offset() const;
-  void set_previous_checkpoint_offset(int64_t offset);
+  int64_t last_checkpoint_offset() const;
+  void set_last_checkpoint_offset(int64_t offset);
   int64_t previous_start_ticks() const;
   int64_t previous_start_nanos() const;
   int64_t last_chunk_duration() const;
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -86,7 +86,7 @@
   // Chunk size
   this->write_be_at_offset(size_written(), CHUNK_SIZE_OFFSET);
   // initial checkpoint event offset
-  this->write_be_at_offset(_chunkstate->previous_checkpoint_offset(), CHUNK_SIZE_OFFSET + (1 * FILEHEADER_SLOT_SIZE));
+  this->write_be_at_offset(_chunkstate->last_checkpoint_offset(), CHUNK_SIZE_OFFSET + (1 * FILEHEADER_SLOT_SIZE));
   // metadata event offset
   this->write_be_at_offset(metadata_offset, CHUNK_SIZE_OFFSET + (2 * FILEHEADER_SLOT_SIZE));
   // start of chunk in nanos since epoch
@@ -105,12 +105,12 @@
   return this->is_valid() ? this->current_offset() : 0;
 }
 
-int64_t JfrChunkWriter::previous_checkpoint_offset() const {
-  return _chunkstate->previous_checkpoint_offset();
+int64_t JfrChunkWriter::last_checkpoint_offset() const {
+  return _chunkstate->last_checkpoint_offset();
 }
 
-void JfrChunkWriter::set_previous_checkpoint_offset(int64_t offset) {
-  _chunkstate->set_previous_checkpoint_offset(offset);
+void JfrChunkWriter::set_last_checkpoint_offset(int64_t offset) {
+  _chunkstate->set_last_checkpoint_offset(offset);
 }
 
 void JfrChunkWriter::time_stamp_chunk_now() {
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -49,8 +49,8 @@
   JfrChunkWriter();
   bool initialize();
   int64_t size_written() const;
-  int64_t previous_checkpoint_offset() const;
-  void set_previous_checkpoint_offset(int64_t offset);
+  int64_t last_checkpoint_offset() const;
+  void set_last_checkpoint_offset(int64_t offset);
   void time_stamp_chunk_now();
 };
 
--- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -133,13 +133,13 @@
 };
 
 static int64_t write_checkpoint_event_prologue(JfrChunkWriter& cw, u8 type_id) {
-  const int64_t prev_cp_offset = cw.previous_checkpoint_offset();
-  const int64_t prev_cp_relative_offset = 0 == prev_cp_offset ? 0 : prev_cp_offset - cw.current_offset();
+  const int64_t last_cp_offset = cw.last_checkpoint_offset();
+  const int64_t delta_to_last_checkpoint = 0 == last_cp_offset ? 0 : last_cp_offset - cw.current_offset();
   cw.reserve(sizeof(u4));
   cw.write<u8>(EVENT_CHECKPOINT);
   cw.write(JfrTicks::now());
-  cw.write((int64_t)0);
-  cw.write(prev_cp_relative_offset); // write previous checkpoint offset delta
+  cw.write((int64_t)0); // duration
+  cw.write(delta_to_last_checkpoint);
   cw.write<bool>(false); // flushpoint
   cw.write((u4)1); // nof types in this checkpoint
   cw.write(type_id);
@@ -178,7 +178,7 @@
     _cw.write_padded_at_offset<u4>(number_of_elements, num_elements_offset);
     _cw.write_padded_at_offset<u4>((u4)_cw.current_offset() - current_cp_offset, current_cp_offset);
     // update writer with last checkpoint position
-    _cw.set_previous_checkpoint_offset(current_cp_offset);
+    _cw.set_last_checkpoint_offset(current_cp_offset);
     return true;
   }
 };
@@ -317,19 +317,16 @@
     vm_error = true;
     prepare_for_vm_error_rotation();
   }
+  if (!_storage.control().to_disk()) {
+    in_memory_rotation();
+  } else if (vm_error) {
+    vm_error_rotation();
+  } else {
+    chunk_rotation();
+  }
   if (msgs & (MSGBIT(MSG_STOP))) {
     stop();
   }
-  // action determined by chunkwriter state
-  if (!_chunkwriter.is_valid()) {
-    in_memory_rotation();
-    return;
-  }
-  if (vm_error) {
-    vm_error_rotation();
-    return;
-  }
-  chunk_rotation();
 }
 
 void JfrRecorderService::prepare_for_vm_error_rotation() {
@@ -400,12 +397,6 @@
   WriteStackTraceCheckpoint write_stack_trace_checkpoint(chunkwriter, TYPE_STACKTRACE, write_stacktrace_repo);
   write_stack_trace_checkpoint.process();
 }
-
-static void write_object_sample_stacktrace(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repository) {
-  WriteObjectSampleStacktrace object_sample_stacktrace(sampler, stack_trace_repository);
-  object_sample_stacktrace.process();
-}
-
 static void write_stringpool_checkpoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) {
   WriteStringPool write_string_pool(string_pool);
   WriteStringPoolCheckpoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool);
@@ -440,9 +431,7 @@
   if (LeakProfiler::is_running()) {
     // Exclusive access to the object sampler instance.
     // The sampler is released (unlocked) later in post_safepoint_write.
-    ObjectSampler* const sampler = ObjectSampler::acquire();
-    assert(sampler != NULL, "invariant");
-    write_object_sample_stacktrace(sampler, _stack_trace_repository);
+    ObjectSampleCheckpoint::on_rotation(ObjectSampler::acquire(), _stack_trace_repository);
   }
   _storage.write();
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/vframe.inline.hpp"
+
+static void copy_frames(JfrStackFrame** lhs_frames, u4 length, const JfrStackFrame* rhs_frames) {
+  assert(lhs_frames != NULL, "invariant");
+  assert(rhs_frames != NULL, "invariant");
+  if (length > 0) {
+    *lhs_frames = NEW_C_HEAP_ARRAY(JfrStackFrame, length, mtTracing);
+    memcpy(*lhs_frames, rhs_frames, length * sizeof(JfrStackFrame));
+  }
+}
+
+JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, const Method* method) :
+  _method(method), _methodid(id), _line(0), _bci(bci), _type(type) {}
+
+JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno) :
+  _method(NULL), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
+
+JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
+  _next(NULL),
+  _frames(frames),
+  _id(0),
+  _hash(0),
+  _nr_of_frames(0),
+  _max_frames(max_frames),
+  _frames_ownership(false),
+  _reached_root(false),
+  _lineno(false),
+  _written(false) {}
+
+JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next) :
+  _next(next),
+  _frames(NULL),
+  _id(id),
+  _hash(trace._hash),
+  _nr_of_frames(trace._nr_of_frames),
+  _max_frames(trace._max_frames),
+  _frames_ownership(true),
+  _reached_root(trace._reached_root),
+  _lineno(trace._lineno),
+  _written(false) {
+  copy_frames(&_frames, trace._nr_of_frames, trace._frames);
+}
+
+JfrStackTrace::~JfrStackTrace() {
+  if (_frames_ownership) {
+    FREE_C_HEAP_ARRAY(JfrStackFrame, _frames);
+  }
+}
+
+template <typename Writer>
+static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
+  w.write((u8)id);
+  w.write((u1)!reached_root);
+  w.write(nr_of_frames);
+  for (u4 i = 0; i < nr_of_frames; ++i) {
+    frames[i].write(w);
+  }
+}
+
+void JfrStackTrace::write(JfrChunkWriter& sw) const {
+  assert(!_written, "invariant");
+  write_stacktrace(sw, _id, _reached_root, _nr_of_frames, _frames);
+  _written = true;
+}
+
+void JfrStackTrace::write(JfrCheckpointWriter& cpw) const {
+  write_stacktrace(cpw, _id, _reached_root, _nr_of_frames, _frames);
+}
+
+bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
+  return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
+}
+
+bool JfrStackTrace::equals(const JfrStackTrace& rhs) const {
+  if (_reached_root != rhs._reached_root || _nr_of_frames != rhs._nr_of_frames || _hash != rhs._hash) {
+    return false;
+  }
+  for (u4 i = 0; i < _nr_of_frames; ++i) {
+    if (!_frames[i].equals(rhs._frames[i])) {
+      return false;
+    }
+  }
+  return true;
+}
+
+template <typename Writer>
+static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
+  w.write((u8)methodid);
+  w.write((u4)line);
+  w.write((u4)bci);
+  w.write((u8)type);
+}
+
+void JfrStackFrame::write(JfrChunkWriter& cw) const {
+  write_frame(cw, _methodid, _line, _bci, _type);
+}
+
+void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
+  write_frame(cpw, _methodid, _line, _bci, _type);
+}
+
+class vframeStreamSamples : public vframeStreamCommon {
+ public:
+  // constructor that starts with sender of frame fr (top_frame)
+  vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
+    _stop_at_java_call_stub = stop_at_java_call_stub;
+    _frame = fr;
+
+    // We must always have a valid frame to start filling
+    bool filled_in = fill_from_frame();
+    assert(filled_in, "invariant");
+  }
+  void samples_next();
+  void stop() {}
+};
+
+// Solaris SPARC Compiler1 needs an additional check on the grandparent
+// of the top_frame when the parent of the top_frame is interpreted and
+// the grandparent is compiled. However, in this method we do not know
+// the relationship of the current _frame relative to the top_frame so
+// we implement a more broad sanity check. When the previous callee is
+// interpreted and the current sender is compiled, we verify that the
+// current sender is also walkable. If it is not walkable, then we mark
+// the current vframeStream as at the end.
+void vframeStreamSamples::samples_next() {
+  // handle frames with inlining
+  if (_mode == compiled_mode &&
+    vframeStreamCommon::fill_in_compiled_inlined_sender()) {
+    return;
+  }
+
+  // handle general case
+  u4 loop_count = 0;
+  u4 loop_max = MAX_STACK_DEPTH * 2;
+  do {
+    loop_count++;
+    // By the time we get here we should never see unsafe but better safe then segv'd
+    if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) {
+      _mode = at_end_mode;
+      return;
+    }
+    _frame = _frame.sender(&_reg_map);
+  } while (!fill_from_frame());
+}
+
+bool JfrStackTrace::record_thread(JavaThread& thread, frame& frame) {
+  vframeStreamSamples st(&thread, frame, false);
+  u4 count = 0;
+  _reached_root = true;
+
+  while (!st.at_end()) {
+    if (count >= _max_frames) {
+      _reached_root = false;
+      break;
+    }
+    const Method* method = st.method();
+    if (!Method::is_valid_method(method)) {
+      // we throw away everything we've gathered in this sample since
+      // none of it is safe
+      return false;
+    }
+    const traceid mid = JfrTraceId::use(method);
+    int type = st.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
+    int bci = 0;
+    if (method->is_native()) {
+      type = JfrStackFrame::FRAME_NATIVE;
+    } else {
+      bci = st.bci();
+    }
+    const int lineno = method->line_number_from_bci(bci);
+    // Can we determine if it's inlined?
+    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
+    _frames[count] = JfrStackFrame(mid, bci, type, method);
+    st.samples_next();
+    count++;
+  }
+
+  _lineno = true;
+  _nr_of_frames = count;
+  return true;
+}
+
+void JfrStackFrame::resolve_lineno() const {
+  assert(_method, "no method pointer");
+  assert(_line == 0, "already have linenumber");
+  _line = _method->line_number_from_bci(_bci);
+}
+
+void JfrStackTrace::resolve_linenos() const {
+  for (unsigned int i = 0; i < _nr_of_frames; i++) {
+    _frames[i].resolve_lineno();
+  }
+  _lineno = true;
+}
+
+bool JfrStackTrace::record_safe(JavaThread* thread, int skip) {
+  assert(thread == Thread::current(), "Thread stack needs to be walkable");
+  vframeStream vfs(thread);
+  u4 count = 0;
+  _reached_root = true;
+  for (int i = 0; i < skip; i++) {
+    if (vfs.at_end()) {
+      break;
+    }
+    vfs.next();
+  }
+
+  while (!vfs.at_end()) {
+    if (count >= _max_frames) {
+      _reached_root = false;
+      break;
+    }
+    const Method* method = vfs.method();
+    const traceid mid = JfrTraceId::use(method);
+    int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
+    int bci = 0;
+    if (method->is_native()) {
+      type = JfrStackFrame::FRAME_NATIVE;
+    }
+    else {
+      bci = vfs.bci();
+    }
+    // Can we determine if it's inlined?
+    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
+    _frames[count] = JfrStackFrame(mid, bci, type, method);
+    vfs.next();
+    count++;
+  }
+
+  _nr_of_frames = count;
+  return true;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
+#define SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+
+class frame;
+class JavaThread;
+class JfrCheckpointWriter;
+class JfrChunkWriter;
+class Method;
+
+class JfrStackFrame {
+  friend class ObjectSampleCheckpoint;
+ private:
+  const Method* _method;
+  traceid _methodid;
+  mutable int _line;
+  int _bci;
+  u1 _type;
+
+ public:
+  JfrStackFrame(const traceid& id, int bci, int type, const Method* method);
+  JfrStackFrame(const traceid& id, int bci, int type, int lineno);
+
+  bool equals(const JfrStackFrame& rhs) const;
+  void write(JfrChunkWriter& cw) const;
+  void write(JfrCheckpointWriter& cpw) const;
+  void resolve_lineno() const;
+
+  enum {
+    FRAME_INTERPRETER = 0,
+    FRAME_JIT,
+    FRAME_INLINE,
+    FRAME_NATIVE,
+    NUM_FRAME_TYPES
+  };
+};
+
+class JfrStackTrace : public JfrCHeapObj {
+  friend class JfrNativeSamplerCallback;
+  friend class JfrStackTraceRepository;
+  friend class ObjectSampleCheckpoint;
+  friend class ObjectSampler;
+  friend class OSThreadSampler;
+  friend class StackTraceResolver;
+ private:
+  const JfrStackTrace* _next;
+  JfrStackFrame* _frames;
+  traceid _id;
+  unsigned int _hash;
+  u4 _nr_of_frames;
+  u4 _max_frames;
+  bool _frames_ownership;
+  bool _reached_root;
+  mutable bool _lineno;
+  mutable bool _written;
+
+  const JfrStackTrace* next() const { return _next; }
+
+  bool should_write() const { return !_written; }
+  void write(JfrChunkWriter& cw) const;
+  void write(JfrCheckpointWriter& cpw) const;
+  bool equals(const JfrStackTrace& rhs) const;
+
+  void set_id(traceid id) { _id = id; }
+  void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
+  void set_hash(unsigned int hash) { _hash = hash; }
+  void set_reached_root(bool reached_root) { _reached_root = reached_root; }
+  void resolve_linenos() const;
+
+  bool record_thread(JavaThread& thread, frame& frame);
+  bool record_safe(JavaThread* thread, int skip);
+
+  bool have_lineno() const { return _lineno; }
+  bool full_stacktrace() const { return _reached_root; }
+
+  JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next);
+  JfrStackTrace(JfrStackFrame* frames, u4 max_frames);
+  ~JfrStackTrace();
+
+ public:
+  unsigned int hash() const { return _hash; }
+  traceid id() const { return _id; }
+};
+
+#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -24,66 +24,18 @@
 
 #include "precompiled.hpp"
 #include "jfr/metadata/jfrSerializer.hpp"
-#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
-#include "jfr/recorder/service/jfrOptionSet.hpp"
 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
-#include "jfr/utilities/jfrTypes.hpp"
-#include "memory/allocation.inline.hpp"
+#include "jfr/support/jfrThreadLocal.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/task.hpp"
-#include "runtime/vframe.inline.hpp"
-
-class vframeStreamSamples : public vframeStreamCommon {
- public:
-  // constructor that starts with sender of frame fr (top_frame)
-  vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub);
-  void samples_next();
-  void stop() {}
-};
-
-vframeStreamSamples::vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
-  _stop_at_java_call_stub = stop_at_java_call_stub;
-  _frame = fr;
-
-  // We must always have a valid frame to start filling
-  bool filled_in = fill_from_frame();
-  assert(filled_in, "invariant");
-}
-
-// Solaris SPARC Compiler1 needs an additional check on the grandparent
-// of the top_frame when the parent of the top_frame is interpreted and
-// the grandparent is compiled. However, in this method we do not know
-// the relationship of the current _frame relative to the top_frame so
-// we implement a more broad sanity check. When the previous callee is
-// interpreted and the current sender is compiled, we verify that the
-// current sender is also walkable. If it is not walkable, then we mark
-// the current vframeStream as at the end.
-void vframeStreamSamples::samples_next() {
-  // handle frames with inlining
-  if (_mode == compiled_mode &&
-      vframeStreamCommon::fill_in_compiled_inlined_sender()) {
-    return;
-  }
-
-  // handle general case
-  u4 loop_count = 0;
-  u4 loop_max = MAX_STACK_DEPTH * 2;
-  do {
-    loop_count++;
-    // By the time we get here we should never see unsafe but better safe then segv'd
-    if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) {
-      _mode = at_end_mode;
-      return;
-    }
-    _frame = _frame.sender(&_reg_map);
-  } while (!fill_from_frame());
-}
 
 static JfrStackTraceRepository* _instance = NULL;
 
+JfrStackTraceRepository::JfrStackTraceRepository() : _next_id(0), _entries(0) {
+  memset(_table, 0, sizeof(_table));
+}
+
 JfrStackTraceRepository& JfrStackTraceRepository::instance() {
   return *_instance;
 }
@@ -94,15 +46,6 @@
   return _instance;
 }
 
-void JfrStackTraceRepository::destroy() {
-  assert(_instance != NULL, "invarinat");
-  delete _instance;
-  _instance = NULL;
-}
-
-JfrStackTraceRepository::JfrStackTraceRepository() : _next_id(0), _entries(0) {
-  memset(_table, 0, sizeof(_table));
-}
 class JfrFrameType : public JfrSerializer {
  public:
   void serialize(JfrCheckpointWriter& writer) {
@@ -122,100 +65,10 @@
   return JfrSerializer::register_serializer(TYPE_FRAMETYPE, false, true, new JfrFrameType());
 }
 
-size_t JfrStackTraceRepository::clear() {
-  MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
-  if (_entries == 0) {
-    return 0;
-  }
-  for (u4 i = 0; i < TABLE_SIZE; ++i) {
-    JfrStackTraceRepository::StackTrace* stacktrace = _table[i];
-    while (stacktrace != NULL) {
-      JfrStackTraceRepository::StackTrace* next = stacktrace->next();
-      delete stacktrace;
-      stacktrace = next;
-    }
-  }
-  memset(_table, 0, sizeof(_table));
-  const size_t processed = _entries;
-  _entries = 0;
-  return processed;
-}
-
-traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
-  MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
-  const size_t index = stacktrace._hash % TABLE_SIZE;
-  const StackTrace* table_entry = _table[index];
-
-  while (table_entry != NULL) {
-    if (table_entry->equals(stacktrace)) {
-      return table_entry->id();
-    }
-    table_entry = table_entry->next();
-  }
-
-  if (!stacktrace.have_lineno()) {
-    return 0;
-  }
-
-  traceid id = ++_next_id;
-  _table[index] = new StackTrace(id, stacktrace, _table[index]);
-  ++_entries;
-  return id;
-}
-
-traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
-  traceid tid = instance().add_trace(stacktrace);
-  if (tid == 0) {
-    stacktrace.resolve_linenos();
-    tid = instance().add_trace(stacktrace);
-  }
-  assert(tid != 0, "invariant");
-  return tid;
-}
-
-traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
-  assert(thread == Thread::current(), "invariant");
-  JfrThreadLocal* const tl = thread->jfr_thread_local();
-  assert(tl != NULL, "invariant");
-  if (tl->has_cached_stack_trace()) {
-    return tl->cached_stack_trace_id();
-  }
-  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
-    return 0;
-  }
-  JfrStackFrame* frames = tl->stackframes();
-  if (frames == NULL) {
-    // pending oom
-    return 0;
-  }
-  assert(frames != NULL, "invariant");
-  assert(tl->stackframes() == frames, "invariant");
-  return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth());
-}
-
-traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
-  JfrStackTrace stacktrace(frames, max_frames);
-  return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0;
-}
-
-traceid JfrStackTraceRepository::add(const JfrStackTrace* stacktrace, JavaThread* thread) {
-  assert(stacktrace != NULL, "invariant");
-  assert(thread != NULL, "invariant");
-  assert(stacktrace->hash() != 0, "invariant");
-  return add(*stacktrace);
-}
-
-bool JfrStackTraceRepository::fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip) {
-  assert(thread == Thread::current(), "invariant");
-  assert(stacktrace != NULL, "invariant");
-  JfrThreadLocal* const tl = thread->jfr_thread_local();
-  assert(tl != NULL, "invariant");
-  const unsigned int cached_stacktrace_hash = tl->cached_stack_trace_hash();
-  if (cached_stacktrace_hash != 0) {
-    stacktrace->set_hash(cached_stacktrace_hash);
-    return true;
-  }
-  return stacktrace->record_safe(thread, skip, true);
+void JfrStackTraceRepository::destroy() {
+  assert(_instance != NULL, "invarinat");
+  delete _instance;
+  _instance = NULL;
 }
 
 size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
@@ -223,9 +76,9 @@
   assert(_entries > 0, "invariant");
   int count = 0;
   for (u4 i = 0; i < TABLE_SIZE; ++i) {
-    JfrStackTraceRepository::StackTrace* stacktrace = _table[i];
+    JfrStackTrace* stacktrace = _table[i];
     while (stacktrace != NULL) {
-      JfrStackTraceRepository::StackTrace* next = stacktrace->next();
+      JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
       if (stacktrace->should_write()) {
         stacktrace->write(sw);
         ++count;
@@ -249,7 +102,7 @@
 
 traceid JfrStackTraceRepository::write(JfrCheckpointWriter& writer, traceid id, unsigned int hash) {
   assert(JfrStacktrace_lock->owned_by_self(), "invariant");
-  const StackTrace* const trace = resolve_entry(hash, id);
+  const JfrStackTrace* const trace = lookup(hash, id);
   assert(trace != NULL, "invariant");
   assert(trace->hash() == hash, "invariant");
   assert(trace->id() == id, "invariant");
@@ -257,82 +110,105 @@
   return id;
 }
 
-JfrStackTraceRepository::StackTrace::StackTrace(traceid id, const JfrStackTrace& trace, JfrStackTraceRepository::StackTrace* next) :
-  _next(next),
-  _frames(NULL),
-  _id(id),
-  _nr_of_frames(trace._nr_of_frames),
-  _hash(trace._hash),
-  _reached_root(trace._reached_root),
-  _written(false) {
-  if (_nr_of_frames > 0) {
-    _frames = NEW_C_HEAP_ARRAY(JfrStackFrame, _nr_of_frames, mtTracing);
-    memcpy(_frames, trace._frames, _nr_of_frames * sizeof(JfrStackFrame));
+void JfrStackTraceRepository::write_metadata(JfrCheckpointWriter& writer) {
+  JfrFrameType fct;
+  writer.write_type(TYPE_FRAMETYPE);
+  fct.serialize(writer);
+}
+
+size_t JfrStackTraceRepository::clear() {
+  MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+  if (_entries == 0) {
+    return 0;
+  }
+  for (u4 i = 0; i < TABLE_SIZE; ++i) {
+    JfrStackTrace* stacktrace = _table[i];
+    while (stacktrace != NULL) {
+      JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
+      delete stacktrace;
+      stacktrace = next;
+    }
+  }
+  memset(_table, 0, sizeof(_table));
+  const size_t processed = _entries;
+  _entries = 0;
+  return processed;
+}
+
+traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
+  assert(thread == Thread::current(), "invariant");
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  if (tl->has_cached_stack_trace()) {
+    return tl->cached_stack_trace_id();
+  }
+  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
+    return 0;
+  }
+  JfrStackFrame* frames = tl->stackframes();
+  if (frames == NULL) {
+    // pending oom
+    return 0;
+  }
+  assert(frames != NULL, "invariant");
+  assert(tl->stackframes() == frames, "invariant");
+  return instance().record_for((JavaThread*)thread, skip, frames, tl->stackdepth());
+}
+
+traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
+  JfrStackTrace stacktrace(frames, max_frames);
+  return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0;
+}
+
+traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
+  traceid tid = instance().add_trace(stacktrace);
+  if (tid == 0) {
+    stacktrace.resolve_linenos();
+    tid = instance().add_trace(stacktrace);
+  }
+  assert(tid != 0, "invariant");
+  return tid;
+}
+
+void JfrStackTraceRepository::record_and_cache(JavaThread* thread, int skip /* 0 */) {
+  assert(thread != NULL, "invariant");
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  assert(!tl->has_cached_stack_trace(), "invariant");
+  JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
+  stacktrace.record_safe(thread, skip);
+  const unsigned int hash = stacktrace.hash();
+  if (hash != 0) {
+    tl->set_cached_stack_trace_id(instance().add(stacktrace), hash);
   }
 }
 
-JfrStackTraceRepository::StackTrace::~StackTrace() {
-  FREE_C_HEAP_ARRAY(JfrStackFrame, _frames);
-}
-
-bool JfrStackTraceRepository::StackTrace::equals(const JfrStackTrace& rhs) const {
-  if (_reached_root != rhs._reached_root || _nr_of_frames != rhs._nr_of_frames || _hash != rhs._hash) {
-    return false;
-  }
-  for (u4 i = 0; i < _nr_of_frames; ++i) {
-    if (!_frames[i].equals(rhs._frames[i])) {
-      return false;
-    }
-  }
-  return true;
-}
-
-template <typename Writer>
-static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
-  w.write((u8)id);
-  w.write((u1)!reached_root);
-  w.write(nr_of_frames);
-  for (u4 i = 0; i < nr_of_frames; ++i) {
-    frames[i].write(w);
-  }
-}
+traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
+  MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+  const size_t index = stacktrace._hash % TABLE_SIZE;
+  const JfrStackTrace* table_entry = _table[index];
 
-void JfrStackTraceRepository::StackTrace::write(JfrChunkWriter& sw) const {
-  assert(!_written, "invariant");
-  write_stacktrace(sw, _id, _reached_root, _nr_of_frames, _frames);
-  _written = true;
-}
-
-void JfrStackTraceRepository::StackTrace::write(JfrCheckpointWriter& cpw) const {
-  write_stacktrace(cpw, _id, _reached_root, _nr_of_frames, _frames);
-}
-
-// JfrStackFrame
+  while (table_entry != NULL) {
+    if (table_entry->equals(stacktrace)) {
+      return table_entry->id();
+    }
+    table_entry = table_entry->next();
+  }
 
-bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
-  return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
-}
+  if (!stacktrace.have_lineno()) {
+    return 0;
+  }
 
-template <typename Writer>
-static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
-  w.write((u8)methodid);
-  w.write((u4)line);
-  w.write((u4)bci);
-  w.write((u8)type);
-}
-
-void JfrStackFrame::write(JfrChunkWriter& cw) const {
-  write_frame(cw, _methodid, _line, _bci, _type);
-}
-
-void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
-  write_frame(cpw, _methodid, _line, _bci, _type);
+  traceid id = ++_next_id;
+  _table[index] = new JfrStackTrace(id, stacktrace, _table[index]);
+  ++_entries;
+  return id;
 }
 
 // invariant is that the entry to be resolved actually exists in the table
-const JfrStackTraceRepository::StackTrace* JfrStackTraceRepository::resolve_entry(unsigned int hash, traceid id) const {
+const JfrStackTrace* JfrStackTraceRepository::lookup(unsigned int hash, traceid id) const {
   const size_t index = (hash % TABLE_SIZE);
-  const StackTrace* trace = _table[index];
+  const JfrStackTrace* trace = _table[index];
   while (trace != NULL && trace->id() != id) {
     trace = trace->next();
   }
@@ -341,102 +217,3 @@
   assert(trace->id() == id, "invariant");
   return trace;
 }
-
-void JfrStackFrame::resolve_lineno() const {
-  assert(_method, "no method pointer");
-  assert(_line == 0, "already have linenumber");
-  _line = _method->line_number_from_bci(_bci);
-  _method = NULL;
-}
-
-void JfrStackTrace::set_frame(u4 frame_pos, JfrStackFrame& frame) {
-  assert(frame_pos < _max_frames, "illegal frame_pos");
-  _frames[frame_pos] = frame;
-}
-
-void JfrStackTrace::resolve_linenos() const {
-  for(unsigned int i = 0; i < _nr_of_frames; i++) {
-    _frames[i].resolve_lineno();
-  }
-  _lineno = true;
-}
-
-bool JfrStackTrace::record_safe(JavaThread* thread, int skip, bool leakp /* false */) {
-  assert(thread == Thread::current(), "Thread stack needs to be walkable");
-  vframeStream vfs(thread);
-  u4 count = 0;
-  _reached_root = true;
-  for(int i = 0; i < skip; i++) {
-    if (vfs.at_end()) {
-      break;
-    }
-    vfs.next();
-  }
-
-  while (!vfs.at_end()) {
-    if (count >= _max_frames) {
-      _reached_root = false;
-      break;
-    }
-    const Method* method = vfs.method();
-    const traceid mid = JfrTraceId::use(method, leakp);
-    int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
-    int bci = 0;
-    if (method->is_native()) {
-      type = JfrStackFrame::FRAME_NATIVE;
-    } else {
-      bci = vfs.bci();
-    }
-    // Can we determine if it's inlined?
-    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
-    _frames[count] = JfrStackFrame(mid, bci, type, method);
-    vfs.next();
-    count++;
-  }
-
-  _nr_of_frames = count;
-  return true;
-}
-
-bool JfrStackTrace::record_thread(JavaThread& thread, frame& frame) {
-  vframeStreamSamples st(&thread, frame, false);
-  u4 count = 0;
-  _reached_root = true;
-
-  while (!st.at_end()) {
-    if (count >= _max_frames) {
-      _reached_root = false;
-      break;
-    }
-    const Method* method = st.method();
-    if (!Method::is_valid_method(method)) {
-      // we throw away everything we've gathered in this sample since
-      // none of it is safe
-      return false;
-    }
-    const traceid mid = JfrTraceId::use(method);
-    int type = st.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
-    int bci = 0;
-    if (method->is_native()) {
-      type = JfrStackFrame::FRAME_NATIVE;
-    } else {
-      bci = st.bci();
-    }
-    const int lineno = method->line_number_from_bci(bci);
-    // Can we determine if it's inlined?
-    _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
-    _frames[count] = JfrStackFrame(mid, bci, type, lineno);
-    st.samples_next();
-    count++;
-  }
-
-  _lineno = true;
-  _nr_of_frames = count;
-  return true;
-}
-
-void JfrStackTraceRepository::write_metadata(JfrCheckpointWriter& writer) {
-  JfrFrameType fct;
-  writer.write_type(TYPE_FRAMETYPE);
-  fct.serialize(writer);
-}
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -25,133 +25,49 @@
 #ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
 #define SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
 
+#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
 #include "jfr/utilities/jfrAllocation.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 
-class frame;
 class JavaThread;
 class JfrCheckpointWriter;
 class JfrChunkWriter;
-class Method;
-
-class JfrStackFrame {
- private:
-  mutable const Method* _method;
-  traceid _methodid;
-  mutable int _line;
-  int _bci;
-  u1 _type;
-
- public:
-  enum {
-    FRAME_INTERPRETER = 0,
-    FRAME_JIT,
-    FRAME_INLINE,
-    FRAME_NATIVE,
-    NUM_FRAME_TYPES
-  };
-
-  JfrStackFrame(const traceid& id, int bci, int type, const Method* method) :
-    _method(method), _methodid(id), _line(0), _bci(bci), _type(type) {}
-  JfrStackFrame(const traceid& id, int bci, int type, int lineno) :
-    _method(NULL), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
-  bool equals(const JfrStackFrame& rhs) const;
-  void write(JfrChunkWriter& cw) const;
-  void write(JfrCheckpointWriter& cpw) const;
-  void resolve_lineno() const;
-};
-
-class JfrStackTrace : public StackObj {
-  friend class JfrStackTraceRepository;
- private:
-  JfrStackFrame* _frames;
-  traceid _id;
-  u4 _nr_of_frames;
-  unsigned int _hash;
-  const u4 _max_frames;
-  bool _reached_root;
-  mutable bool _lineno;
-
- public:
-  JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames),
-                                                        _id(0),
-                                                        _nr_of_frames(0),
-                                                        _hash(0),
-                                                        _max_frames(max_frames),
-                                                        _reached_root(false),
-                                                        _lineno(false) {}
-  bool record_thread(JavaThread& thread, frame& frame);
-  bool record_safe(JavaThread* thread, int skip, bool leakp = false);
-  void resolve_linenos() const;
-  void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
-  void set_hash(unsigned int hash) { _hash = hash; }
-  unsigned int hash() const { return _hash; }
-  void set_frame(u4 frame_pos, JfrStackFrame& frame);
-  void set_reached_root(bool reached_root) { _reached_root = reached_root; }
-  bool full_stacktrace() const { return _reached_root; }
-  bool have_lineno() const { return _lineno; }
-};
 
 class JfrStackTraceRepository : public JfrCHeapObj {
   friend class JfrRecorder;
   friend class JfrRecorderService;
+  friend class JfrThreadSampleClosure;
+  friend class ObjectSampleCheckpoint;
   friend class ObjectSampler;
-  friend class WriteObjectSampleStacktrace;
-
-  class StackTrace : public JfrCHeapObj {
-    friend class JfrStackTrace;
-    friend class JfrStackTraceRepository;
-   private:
-    StackTrace* _next;
-    JfrStackFrame* _frames;
-    const traceid _id;
-    u4 _nr_of_frames;
-    unsigned int _hash;
-    bool _reached_root;
-    mutable bool _written;
-
-    unsigned int hash() const { return _hash; }
-    bool should_write() const { return !_written; }
-
-   public:
-    StackTrace(traceid id, const JfrStackTrace& trace, StackTrace* next);
-    ~StackTrace();
-    traceid id() const { return _id; }
-    StackTrace* next() const { return _next; }
-    void write(JfrChunkWriter& cw) const;
-    void write(JfrCheckpointWriter& cpw) const;
-    bool equals(const JfrStackTrace& rhs) const;
-  };
+  friend class StackTraceBlobInstaller;
+  friend class WriteStackTraceRepository;
 
  private:
   static const u4 TABLE_SIZE = 2053;
-  StackTrace* _table[TABLE_SIZE];
+  JfrStackTrace* _table[TABLE_SIZE];
   traceid _next_id;
   u4 _entries;
 
-  traceid add_trace(const JfrStackTrace& stacktrace);
-  static traceid add(const JfrStackTrace* stacktrace, JavaThread* thread);
-  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
-
-  size_t write_impl(JfrChunkWriter& cw, bool clear);
-  const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
-  static void write_metadata(JfrCheckpointWriter& cpw);
-
-  static bool fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip);
-
   JfrStackTraceRepository();
+  static JfrStackTraceRepository& instance();
   static JfrStackTraceRepository* create();
   bool initialize();
   static void destroy();
 
-  static JfrStackTraceRepository& instance();
-
- public:
-  static traceid add(const JfrStackTrace& stacktrace);
-  static traceid record(Thread* thread, int skip = 0);
+  size_t write_impl(JfrChunkWriter& cw, bool clear);
+  static void write_metadata(JfrCheckpointWriter& cpw);
   traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash);
   size_t write(JfrChunkWriter& cw, bool clear);
   size_t clear();
+
+  traceid add_trace(const JfrStackTrace& stacktrace);
+  static traceid add(const JfrStackTrace& stacktrace);
+  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
+  const JfrStackTrace* lookup(unsigned int hash, traceid id) const;
+
+ public:
+  static traceid record(Thread* thread, int skip = 0);
+  static void record_and_cache(JavaThread* thread, int skip = 0);
 };
 
 #endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
--- a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -27,10 +27,6 @@
 #include "jfr/utilities/jfrAllocation.hpp"
 #include "jfr/utilities/jfrDoublyLinkedList.hpp"
 #include "jfr/utilities/jfrIterator.hpp"
-#include "jfr/utilities/jfrTypes.hpp"
-#include "runtime/os.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
 
 template <typename T, template <typename> class RetrievalType, typename Callback>
 class JfrMemorySpace : public JfrCHeapObj {
@@ -107,62 +103,4 @@
   debug_only(bool in_free_list(const Type* t) const { return _free.in_list(t); })
 };
 
-// allocations are even multiples of the mspace min size
-inline u8 align_allocation_size(u8 requested_size, size_t min_elem_size) {
-  assert((int)min_elem_size % os::vm_page_size() == 0, "invariant");
-  u8 alloc_size_bytes = min_elem_size;
-  while (requested_size > alloc_size_bytes) {
-    alloc_size_bytes <<= 1;
-  }
-  assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant");
-  return alloc_size_bytes;
-}
-
-template <typename T, template <typename> class RetrievalType, typename Callback>
-T* JfrMemorySpace<T, RetrievalType, Callback>::allocate(size_t size) {
-  const u8 aligned_size_bytes = align_allocation_size(size, _min_elem_size);
-  void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(T));
-  if (allocation == NULL) {
-    return NULL;
-  }
-  T* const t = new (allocation) T;
-  assert(t != NULL, "invariant");
-  if (!t->initialize(sizeof(T), aligned_size_bytes)) {
-    JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T));
-    return NULL;
-  }
-  return t;
-}
-
-template <typename T, template <typename> class RetrievalType, typename Callback>
-void JfrMemorySpace<T, RetrievalType, Callback>::deallocate(T* t) {
-  assert(t != NULL, "invariant");
-  assert(!_free.in_list(t), "invariant");
-  assert(!_full.in_list(t), "invariant");
-  assert(t != NULL, "invariant");
-  JfrCHeapObj::free(t, t->total_size());
-}
-
-template <typename Mspace>
-class MspaceLock {
- private:
-  Mspace* _mspace;
- public:
-  MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); }
-  ~MspaceLock() { _mspace->unlock(); }
-};
-
-template <typename Mspace>
-class ReleaseOp : public StackObj {
- private:
-  Mspace* _mspace;
-  Thread* _thread;
-  bool _release_full;
- public:
-  typedef typename Mspace::Type Type;
-  ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) : _mspace(mspace), _thread(thread), _release_full(release_full) {}
-  bool process(Type* t);
-  size_t processed() const { return 0; }
-};
-
 #endif // SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP
--- a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -26,6 +26,7 @@
 #define SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP
 
 #include "jfr/recorder/storage/jfrMemorySpace.hpp"
+#include "runtime/os.hpp"
 
 template <typename T, template <typename> class RetrievalType, typename Callback>
 JfrMemorySpace<T, RetrievalType, Callback>::
@@ -69,6 +70,42 @@
   return true;
 }
 
+// allocations are even multiples of the mspace min size
+static inline size_t align_allocation_size(size_t requested_size, size_t min_elem_size) {
+  assert((int)min_elem_size % os::vm_page_size() == 0, "invariant");
+  u8 alloc_size_bytes = min_elem_size;
+  while (requested_size > alloc_size_bytes) {
+    alloc_size_bytes <<= 1;
+  }
+  assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant");
+  return (size_t)alloc_size_bytes;
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+inline T* JfrMemorySpace<T, RetrievalType, Callback>::allocate(size_t size) {
+  const size_t aligned_size_bytes = align_allocation_size(size, _min_elem_size);
+  void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(T));
+  if (allocation == NULL) {
+    return NULL;
+  }
+  T* const t = new (allocation) T;
+  assert(t != NULL, "invariant");
+  if (!t->initialize(sizeof(T), aligned_size_bytes)) {
+    JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T));
+    return NULL;
+  }
+  return t;
+}
+
+template <typename T, template <typename> class RetrievalType, typename Callback>
+inline void JfrMemorySpace<T, RetrievalType, Callback>::deallocate(T* t) {
+  assert(t != NULL, "invariant");
+  assert(!_free.in_list(t), "invariant");
+  assert(!_full.in_list(t), "invariant");
+  assert(t != NULL, "invariant");
+  JfrCHeapObj::free(t, t->total_size());
+}
+
 template <typename T, template <typename> class RetrievalType, typename Callback>
 inline void JfrMemorySpace<T, RetrievalType, Callback>::release_full(T* t) {
   assert(is_locked(), "invariant");
@@ -122,6 +159,15 @@
   }
 }
 
+template <typename Mspace, typename Callback>
+static inline Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, Callback* cb) {
+  Mspace* const mspace = new Mspace(buffer_size, limit, cache_count, cb);
+  if (mspace != NULL) {
+    mspace->initialize();
+  }
+  return mspace;
+}
+
 template <typename Mspace>
 inline size_t size_adjustment(size_t size, Mspace* mspace) {
   assert(mspace != NULL, "invariant");
@@ -174,6 +220,15 @@
 }
 
 template <typename Mspace>
+class MspaceLock {
+ private:
+  Mspace* _mspace;
+ public:
+  MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); }
+  ~MspaceLock() { _mspace->unlock(); }
+};
+
+template <typename Mspace>
 inline typename Mspace::Type* mspace_allocate_transient_to_full(size_t size, Mspace* mspace, Thread* thread) {
   typename Mspace::Type* const t = mspace_allocate_transient(size, mspace, thread);
   if (t == NULL) return NULL;
@@ -344,6 +399,20 @@
 }
 
 template <typename Mspace>
+class ReleaseOp : public StackObj {
+ private:
+  Mspace* _mspace;
+  Thread* _thread;
+  bool _release_full;
+ public:
+  typedef typename Mspace::Type Type;
+  ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) :
+    _mspace(mspace), _thread(thread), _release_full(release_full) {}
+  bool process(Type* t);
+  size_t processed() const { return 0; }
+};
+
+template <typename Mspace>
 inline bool ReleaseOp<Mspace>::process(typename Mspace::Type* t) {
   assert(t != NULL, "invariant");
   // assumes some means of exclusive access to t
--- a/src/hotspot/share/jfr/support/jfrKlassExtension.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/support/jfrKlassExtension.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -36,6 +36,7 @@
 #define JDK_JFR_EVENT_SUBKLASS 16
 #define JDK_JFR_EVENT_KLASS    32
 #define EVENT_HOST_KLASS       64
+#define EVENT_RESERVED         128
 #define IS_EVENT_KLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)) != 0)
 #define ON_KLASS_CREATION(k, p, t) if (IS_EVENT_KLASS(k)) JfrEventClassTransformer::on_klass_creation(k, p, t)
 
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -25,13 +25,13 @@
 #include "precompiled.hpp"
 #include "jfr/jfrEvents.hpp"
 #include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 #include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
 #include "jfr/recorder/jfrRecorder.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
 #include "jfr/recorder/service/jfrOptionSet.hpp"
 #include "jfr/recorder/storage/jfrStorage.hpp"
-#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
 #include "jfr/support/jfrThreadLocal.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/os.hpp"
@@ -46,7 +46,7 @@
   _shelved_buffer(NULL),
   _stackframes(NULL),
   _trace_id(JfrTraceId::assign_thread_id()),
-  _thread_cp(),
+  _thread(),
   _data_lost(0),
   _stack_trace_id(max_julong),
   _user_time(0),
@@ -62,17 +62,17 @@
   return _data_lost;
 }
 
-bool JfrThreadLocal::has_thread_checkpoint() const {
-  return _thread_cp.valid();
+bool JfrThreadLocal::has_thread_blob() const {
+  return _thread.valid();
 }
 
-void JfrThreadLocal::set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) {
-  assert(!_thread_cp.valid(), "invariant");
-  _thread_cp = ref;
+void JfrThreadLocal::set_thread_blob(const JfrBlobHandle& ref) {
+  assert(!_thread.valid(), "invariant");
+  _thread = ref;
 }
 
-const JfrCheckpointBlobHandle& JfrThreadLocal::thread_checkpoint() const {
-  return _thread_cp;
+const JfrBlobHandle& JfrThreadLocal::thread_blob() const {
+  return _thread;
 }
 
 static void send_java_thread_start_event(JavaThread* jt) {
@@ -95,10 +95,12 @@
   assert(jt != NULL, "invariant");
   assert(Thread::current() == jt, "invariant");
   assert(jt->jfr_thread_local()->trace_id() == id, "invariant");
-  EventThreadEnd event;
-  event.set_thread(id);
-  event.commit();
-  JfrThreadCPULoadEvent::send_event_for_thread(jt);
+  if (JfrRecorder::is_recording()) {
+    EventThreadEnd event;
+    event.set_thread(id);
+    event.commit();
+    JfrThreadCPULoadEvent::send_event_for_thread(jt);
+  }
 }
 
 void JfrThreadLocal::release(JfrThreadLocal* tl, Thread* t) {
@@ -125,10 +127,10 @@
   assert(t != NULL, "invariant");
   JfrThreadLocal * const tl = t->jfr_thread_local();
   assert(!tl->is_dead(), "invariant");
-  if (JfrRecorder::is_recording()) {
-    if (t->is_Java_thread()) {
-      send_java_thread_end_events(tl->thread_id(), (JavaThread*)t);
-    }
+  if (t->is_Java_thread()) {
+    JavaThread* const jt = (JavaThread*)t;
+    ObjectSampleCheckpoint::on_thread_exit(jt);
+    send_java_thread_end_events(tl->thread_id(), jt);
   }
   release(tl, Thread::current()); // because it could be that Thread::current() != t
 }
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -25,7 +25,7 @@
 #ifndef SHARE_JFR_SUPPORT_JFRTHREADLOCAL_HPP
 #define SHARE_JFR_SUPPORT_JFRTHREADLOCAL_HPP
 
-#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 
 class JavaThread;
@@ -41,7 +41,7 @@
   JfrBuffer* _shelved_buffer;
   mutable JfrStackFrame* _stackframes;
   mutable traceid _trace_id;
-  JfrCheckpointBlobHandle _thread_cp;
+  JfrBlobHandle _thread;
   u8 _data_lost;
   traceid _stack_trace_id;
   jlong _user_time;
@@ -207,9 +207,9 @@
     return _dead;
   }
 
-  bool has_thread_checkpoint() const;
-  void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle);
-  const JfrCheckpointBlobHandle& thread_checkpoint() const;
+  bool has_thread_blob() const;
+  void set_thread_blob(const JfrBlobHandle& handle);
+  const JfrBlobHandle& thread_blob() const;
 
   static void on_start(Thread* t);
   static void on_exit(Thread* t);
--- a/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -43,39 +43,46 @@
 
 class JfrTraceFlag {
  private:
-  mutable jbyte _flags;
+  mutable jshort _flags;
  public:
   JfrTraceFlag() : _flags(0) {}
-  explicit JfrTraceFlag(jbyte flags) : _flags(flags) {}
-  void set_flag(jbyte flag) const {
-    _flags |= flag;
-  }
-  void clear_flag(jbyte flag) const {
-    _flags &= (~flag);
-  }
-  jbyte flags() const { return _flags; }
-  bool is_set(jbyte flag) const {
+  bool is_set(jshort flag) const {
     return (_flags & flag) != 0;
   }
-  jbyte* const flags_addr() const {
-    return &_flags;
+
+  jshort flags() const {
+    return _flags;
+  }
+
+  void set_flags(jshort flags) const {
+    _flags = flags;
+  }
+
+  jbyte* flags_addr() const {
+    return (jbyte*)&_flags;
+  }
+  jbyte* meta_addr() const {
+    return ((jbyte*)&_flags) + 1;
   }
 };
 
 #define DEFINE_TRACE_FLAG mutable JfrTraceFlag _trace_flags
 
 #define DEFINE_TRACE_FLAG_ACCESSOR                 \
-  void set_trace_flag(jbyte flag) const {          \
-    _trace_flags.set_flag(flag);                   \
+  bool is_trace_flag_set(jshort flag) const {      \
+    return _trace_flags.is_set(flag);              \
   }                                                \
-  jbyte trace_flags() const {                      \
+  jshort trace_flags() const {                     \
     return _trace_flags.flags();                   \
   }                                                \
-  bool is_trace_flag_set(jbyte flag) const {       \
-    return _trace_flags.is_set(flag);              \
+  void set_trace_flags(jshort flags) const {       \
+    _trace_flags.set_flags(flags);                 \
   }                                                \
-  jbyte* const trace_flags_addr() const {          \
+  jbyte* trace_flags_addr() const {                \
     return _trace_flags.flags_addr();              \
+  }                                                \
+  jbyte* trace_meta_addr() const {                 \
+    return _trace_flags.meta_addr();               \
   }
 
 #endif // SHARE_JFR_SUPPORT_JFRTRACEIDEXTENSION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/utilities/jfrBlob.cpp	Sat Sep 14 14:40:09 2019 +0200
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
+
+JfrBlob::JfrBlob(const u1* checkpoint, size_t size) :
+  _data(JfrCHeapObj::new_array<u1>(size)),
+  _next(),
+  _size(size),
+  _written(false) {
+  assert(_data != NULL, "invariant");
+  memcpy(const_cast<u1*>(_data), checkpoint, size);
+}
+
+JfrBlob::~JfrBlob() {
+  JfrCHeapObj::free(const_cast<u1*>(_data), _size);
+}
+
+void JfrBlob::reset_write_state() const {
+  if (!_written) {
+    return;
+  }
+  _written = false;
+  if (_next.valid()) {
+    _next->reset_write_state();
+  }
+}
+
+void JfrBlob::set_next(const JfrBlobHandle& ref) {
+  if (_next == ref) {
+    return;
+  }
+  assert(_next != ref, "invariant");
+  if (_next.valid()) {
+    _next->set_next(ref);
+    return;
+  }
+  _next = ref;
+}
+
+JfrBlobHandle JfrBlob::make(const u1* data, size_t size) {
+  const JfrBlob* const blob = new JfrBlob(data, size);
+  assert(blob != NULL, "invariant");
+  return JfrBlobReference::make(blob);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/utilities/jfrBlob.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_UTILITIES_JFRBLOB_HPP
+#define SHARE_JFR_UTILITIES_JFRBLOB_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrRefCountPointer.hpp"
+
+class JfrBlob;
+typedef RefCountPointer<JfrBlob, MultiThreadedRefCounter> JfrBlobReference;
+typedef RefCountHandle<JfrBlobReference> JfrBlobHandle;
+
+class JfrBlob : public JfrCHeapObj {
+  template <typename, typename>
+  friend class RefCountPointer;
+ private:
+  const u1* const _data;
+  JfrBlobHandle _next;
+  const size_t _size;
+  mutable bool _written;
+
+  JfrBlob(const u1* data, size_t size);
+  ~JfrBlob();
+
+ public:
+  void set_next(const JfrBlobHandle& ref);
+  void reset_write_state() const;
+  static JfrBlobHandle make(const u1* data, size_t size);
+  template <typename Writer>
+  void write(Writer& writer) const {
+    writer.bytes(_data, _size);
+    if (_next.valid()) {
+      _next->write(writer);
+    }
+  }
+  template <typename Writer>
+  void exclusive_write(Writer& writer) const {
+    if (_written) {
+      return;
+    }
+    writer.bytes(_data, _size);
+    _written = true;
+    if (_next.valid()) {
+      _next->exclusive_write(writer);
+    }
+  }
+};
+
+#endif // SHARE_JFR_UTILITIES_JFRBLOB_HPP
--- a/src/hotspot/share/jfr/utilities/jfrHashtable.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/utilities/jfrHashtable.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -25,13 +25,13 @@
 #ifndef SHARE_JFR_UTILITIES_JFRHASHTABLE_HPP
 #define SHARE_JFR_UTILITIES_JFRHASHTABLE_HPP
 
-#include "memory/allocation.inline.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
 #include "runtime/orderAccess.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
 template <typename T>
-class JfrBasicHashtableEntry {
+class JfrBasicHashtableEntry : public JfrCHeapObj {
  private:
   typedef JfrBasicHashtableEntry<T> Entry;
   Entry* _next;
@@ -39,8 +39,8 @@
   uintptr_t _hash;
 
  public:
+  JfrBasicHashtableEntry(uintptr_t hash, const T& data) : _next(NULL), _literal(data), _hash(hash) {}
   uintptr_t hash() const { return _hash; }
-  void set_hash(uintptr_t hash) { _hash = hash; }
   T literal() const { return _literal; }
   T* literal_addr() { return &_literal; }
   void set_literal(T s) { _literal = s; }
@@ -93,7 +93,6 @@
   }
   void free_buckets() {
     FREE_C_HEAP_ARRAY(Bucket, _buckets);
-    _buckets = NULL;
   }
   TableEntry* bucket(size_t i) { return _buckets[i].get_entry();}
   TableEntry** bucket_addr(size_t i) { return _buckets[i].entry_addr(); }
@@ -108,18 +107,18 @@
 };
 
 template <typename IdType, typename Entry, typename T>
-class AscendingId : public CHeapObj<mtTracing>  {
+class AscendingId : public JfrCHeapObj  {
  private:
   IdType _id;
  public:
   AscendingId() : _id(0) {}
   // callbacks
-  void assign_id(Entry* entry) {
+  void on_link(Entry* entry) {
     assert(entry != NULL, "invariant");
     assert(entry->id() == 0, "invariant");
     entry->set_id(++_id);
   }
-  bool equals(const T& data, uintptr_t hash, const Entry* entry) {
+  bool on_equals(uintptr_t hash, const Entry* entry) {
     assert(entry->hash() == hash, "invariant");
     return true;
   }
@@ -127,18 +126,16 @@
 
 // IdType must be scalar
 template <typename T, typename IdType>
-class Entry : public JfrBasicHashtableEntry<T> {
+class JfrHashtableEntry : public JfrBasicHashtableEntry<T> {
  public:
+  JfrHashtableEntry(uintptr_t hash, const T& data) : JfrBasicHashtableEntry<T>(hash, data), _id(0) {}
   typedef IdType ID;
-  void init() { _id = 0; }
   ID id() const { return _id; }
-  void set_id(ID id) { _id = id; }
-  void set_value(const T& value) { this->set_literal(value); }
-  T& value() const { return *const_cast<Entry*>(this)->literal_addr();}
-  const T* value_addr() const { return const_cast<Entry*>(this)->literal_addr(); }
-
+  void set_id(ID id) const { _id = id; }
+  T& value() const { return *const_cast<JfrHashtableEntry*>(this)->literal_addr();}
+  const T* value_addr() const { return const_cast<JfrHashtableEntry*>(this)->literal_addr(); }
  private:
-  ID _id;
+  mutable ID _id;
 };
 
 template <typename T, typename IdType, template <typename, typename> class Entry,
@@ -147,29 +144,28 @@
 class HashTableHost : public JfrBasicHashtable<T> {
  public:
   typedef Entry<T, IdType> HashEntry;
-  HashTableHost() : _callback(new Callback()) {}
-  HashTableHost(Callback* cb) : JfrBasicHashtable<T>(TABLE_SIZE, sizeof(HashEntry)), _callback(cb) {}
+  HashTableHost(size_t size = 0) : JfrBasicHashtable<T>(size == 0 ? TABLE_SIZE : size, sizeof(HashEntry)), _callback(new Callback()) {}
+  HashTableHost(Callback* cb, size_t size = 0) : JfrBasicHashtable<T>(size == 0 ? TABLE_SIZE : size, sizeof(HashEntry)), _callback(cb) {}
   ~HashTableHost() {
     this->clear_entries();
     this->free_buckets();
   }
 
   // direct insert assumes non-existing entry
-  HashEntry& put(const T& data, uintptr_t hash);
+  HashEntry& put(uintptr_t hash, const T& data);
 
   // lookup entry, will put if not found
-  HashEntry& lookup_put(const T& data, uintptr_t hash) {
-    HashEntry* entry = lookup_only(data, hash);
-    return entry == NULL ? put(data, hash) : *entry;
+  HashEntry& lookup_put(uintptr_t hash, const T& data) {
+    HashEntry* entry = lookup_only(hash);
+    return entry == NULL ? put(hash, data) : *entry;
   }
 
-  // read-only lookup
-  HashEntry* lookup_only(const T& query, uintptr_t hash);
+  HashEntry* lookup_only(uintptr_t hash);
 
   // id retrieval
-  IdType id(const T& data, uintptr_t hash) {
+  IdType id(uintptr_t hash, const T& data) {
     assert(data != NULL, "invariant");
-    const HashEntry& entry = lookup_put(data, hash);
+    const HashEntry& entry = lookup_put(hash, data);
     assert(entry.id() > 0, "invariant");
     return entry.id();
   }
@@ -188,34 +184,35 @@
   void free_entry(HashEntry* entry) {
     assert(entry != NULL, "invariant");
     JfrBasicHashtable<T>::unlink_entry(entry);
-    FREE_C_HEAP_ARRAY(char, entry);
+    _callback->on_unlink(entry);
+    delete entry;
   }
 
  private:
   Callback* _callback;
   size_t index_for(uintptr_t hash) { return this->hash_to_index(hash); }
-  HashEntry* new_entry(const T& data, uintptr_t hash);
+  HashEntry* new_entry(uintptr_t hash, const T& data);
   void add_entry(size_t index, HashEntry* new_entry) {
     assert(new_entry != NULL, "invariant");
-    _callback->assign_id(new_entry);
+    _callback->on_link(new_entry);
     assert(new_entry->id() > 0, "invariant");
     JfrBasicHashtable<T>::add_entry(index, new_entry);
   }
 };
 
 template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
-Entry<T, IdType>& HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::put(const T& data, uintptr_t hash) {
-  assert(lookup_only(data, hash) == NULL, "use lookup_put()");
-  HashEntry* const entry = new_entry(data, hash);
+Entry<T, IdType>& HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::put(uintptr_t hash, const T& data) {
+  assert(lookup_only(hash) == NULL, "use lookup_put()");
+  HashEntry* const entry = new_entry(hash, data);
   add_entry(index_for(hash), entry);
   return *entry;
 }
 
 template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
-Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::lookup_only(const T& query, uintptr_t hash) {
+Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::lookup_only(uintptr_t hash) {
   HashEntry* entry = (HashEntry*)this->bucket(index_for(hash));
   while (entry != NULL) {
-    if (entry->hash() == hash && _callback->equals(query, hash, entry)) {
+    if (entry->hash() == hash && _callback->on_equals(hash, entry)) {
       return entry;
     }
     entry = (HashEntry*)entry->next();
@@ -267,13 +264,10 @@
 }
 
 template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
-Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::new_entry(const T& data, uintptr_t hash) {
+Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::new_entry(uintptr_t hash, const T& data) {
   assert(sizeof(HashEntry) == this->entry_size(), "invariant");
-  HashEntry* const entry = (HashEntry*) NEW_C_HEAP_ARRAY2(char, this->entry_size(), mtTracing, CURRENT_PC);
-  entry->init();
-  entry->set_hash(hash);
-  entry->set_value(data);
-  entry->set_next(NULL);
+  HashEntry* const entry = new HashEntry(hash, data);
+  assert(entry != NULL, "invariant");
   assert(0 == entry->id(), "invariant");
   return entry;
 }
--- a/src/hotspot/share/jfr/utilities/jfrTypes.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/utilities/jfrTypes.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -26,8 +26,6 @@
 #define SHARE_JFR_UTILITIES_JFRTYPES_HPP
 
 #include "jfrfiles/jfrEventIds.hpp"
-#include "memory/allocation.hpp"
-#include "utilities/globalDefinitions.hpp"
 
 typedef u8 traceid;
 typedef int fio_fd;
@@ -37,6 +35,14 @@
 const u4 MIN_STACK_DEPTH = 1;
 const u4 MAX_STACK_DEPTH = 2048;
 
+inline int compare_traceid(const traceid& lhs, const traceid& rhs) {
+  return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
+}
+
+inline int sort_traceid(traceid* lhs, traceid* rhs) {
+  return compare_traceid(*lhs, *rhs);
+}
+
 enum EventStartTime {
   UNTIMED,
   TIMED
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/writers/jfrTypeWriterHost.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
+#define SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
+
+#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.hpp"
+
+template <typename WriterImpl, u4 ID>
+class JfrTypeWriterHost : public StackObj {
+ private:
+  WriterImpl _impl;
+  JfrCheckpointWriter* _writer;
+  JfrCheckpointContext _ctx;
+  int64_t _count_offset;
+  int _count;
+  bool _skip_header;
+ public:
+  JfrTypeWriterHost(JfrCheckpointWriter* writer,
+                    bool class_unload = false,
+                    bool skip_header = false) : _impl(writer, class_unload),
+                                                _writer(writer),
+                                                _ctx(writer->context()),
+                                                _count(0),
+                                                _skip_header(skip_header) {
+    assert(_writer != NULL, "invariant");
+    if (!_skip_header) {
+      _writer->write_type((JfrTypeId)ID);
+      _count_offset = _writer->reserve(sizeof(u4)); // Don't know how many yet
+    }
+  }
+
+  ~JfrTypeWriterHost() {
+    if (_count == 0) {
+      // nothing written, restore context for rewind
+      _writer->set_context(_ctx);
+      return;
+    }
+    assert(_count > 0, "invariant");
+    if (!_skip_header) {
+      _writer->write_count(_count, _count_offset);
+    }
+  }
+
+  bool operator()(typename WriterImpl::Type const & value) {
+    this->_count += _impl(value);
+    return true;
+  }
+
+  int count() const   { return _count; }
+  void add(int count) { _count += count; }
+};
+
+typedef int(*type_write_operation)(JfrCheckpointWriter*, const void*);
+
+template <typename T, type_write_operation op>
+class JfrTypeWriterImplHost {
+ private:
+  JfrCheckpointWriter* _writer;
+ public:
+  typedef T Type;
+  JfrTypeWriterImplHost(JfrCheckpointWriter* writer, bool class_unload = false) : _writer(writer) {}
+  int operator()(T const& value) {
+    return op(this->_writer, value);
+  }
+};
+
+template <typename T, typename Predicate, type_write_operation op>
+class JfrPredicatedTypeWriterImplHost : public JfrTypeWriterImplHost<T, op> {
+ private:
+  Predicate _predicate;
+  typedef JfrTypeWriterImplHost<T, op> Parent;
+ public:
+  JfrPredicatedTypeWriterImplHost(JfrCheckpointWriter* writer, bool class_unload = false) :
+    Parent(writer), _predicate(class_unload) {}
+  int operator()(T const& value) {
+    return _predicate(value) ? Parent::operator()(value) : 0;
+  }
+};
+
+#endif // SHARE_JFR_WRITERS_JFRTYPEWRITERHOST_HPP
--- a/src/hotspot/share/jfr/writers/jfrWriterHost.inline.hpp	Fri Sep 13 16:03:31 2019 -0700
+++ b/src/hotspot/share/jfr/writers/jfrWriterHost.inline.hpp	Sat Sep 14 14:40:09 2019 +0200
@@ -170,7 +170,7 @@
   }
   if (this->available_size() < requested + size_safety_cushion) {
     if (!this->accommodate(this->used_size(), requested + size_safety_cushion)) {
-      this->cancel();
+      assert(!this->is_valid(), "invariant");
       return NULL;
     }
   }