old object sample blob:ification and serialization support JEP-349-branch
authormgronlun
Mon, 02 Sep 2019 19:42:46 +0200
branchJEP-349-branch
changeset 57983 a57907813a83
parent 57971 aa7b1ea52413
child 57984 269bbe414580
old object sample blob:ification and serialization support
src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp
src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp
src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp
src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp
src/hotspot/share/jfr/leakprofiler/sampling/sampleList.hpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.hpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp
src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp
src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp
src/hotspot/share/jfr/recorder/repository/jfrChunkState.hpp
src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
src/hotspot/share/jfr/support/jfrThreadLocal.cpp
src/hotspot/share/jfr/support/jfrThreadLocal.hpp
src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp
src/hotspot/share/jfr/utilities/jfrBlob.cpp
src/hotspot/share/jfr/utilities/jfrBlob.hpp
src/hotspot/share/jfr/utilities/jfrHashtable.hpp
src/hotspot/share/jfr/utilities/jfrTypes.hpp
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -55,13 +55,13 @@
   return !_edges->has_entries();
 }
 
-void EdgeStore::assign_id(EdgeEntry* entry) {
+void EdgeStore::link(EdgeEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->id() == 0, "invariant");
   entry->set_id(++_edge_id_counter);
 }
 
-bool EdgeStore::equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry) {
+bool EdgeStore::equals(uintptr_t hash, const EdgeEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->hash() == hash, "invariant");
   return true;
@@ -80,22 +80,21 @@
 
 StoredEdge* EdgeStore::get(const oop* reference) const {
   assert(reference != NULL, "invariant");
-  const StoredEdge e(NULL, reference);
-  EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference);
+  EdgeEntry* const entry = _edges->lookup_only((uintptr_t)reference);
   return entry != NULL ? entry->literal_addr() : NULL;
 }
 
 StoredEdge* EdgeStore::put(const oop* reference) {
   assert(reference != NULL, "invariant");
   const StoredEdge e(NULL, reference);
-  assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant");
-  EdgeEntry& entry = _edges->put(e, (uintptr_t)reference);
+  assert(NULL == _edges->lookup_only((uintptr_t)reference), "invariant");
+  EdgeEntry& entry = _edges->put((uintptr_t)reference, e);
   return entry.literal_addr();
 }
 
 traceid EdgeStore::get_id(const Edge* edge) const {
   assert(edge != NULL, "invariant");
-  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
+  EdgeEntry* const entry = _edges->lookup_only((uintptr_t)edge->reference());
   assert(entry != NULL, "invariant");
   return entry->id();
 }
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -58,7 +58,7 @@
 };
 
 class EdgeStore : public CHeapObj<mtTracing> {
-  typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable;
+  typedef HashTableHost<StoredEdge, traceid, JfrHashtableEntry, EdgeStore> EdgeHashTable;
   typedef EdgeHashTable::HashEntry EdgeEntry;
   template <typename,
             typename,
@@ -74,8 +74,8 @@
   EdgeHashTable* _edges;
 
   // Hash table callbacks
-  void assign_id(EdgeEntry* entry);
-  bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
+  void link(EdgeEntry* entry);
+  bool equals(uintptr_t hash, const EdgeEntry* entry);
   void unlink(EdgeEntry* entry);
 
   StoredEdge* get(const oop* reference) const;
--- a/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -43,7 +43,6 @@
 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
-
 #include "oops/oop.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "utilities/globalDefinitions.hpp"
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -31,12 +31,11 @@
 #include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/sampling/objectSample.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/leakprofiler/utilities/rootType.hpp"
-#include "jfr/metadata/jfrSerializer.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
 #include "jfr/recorder/service/jfrOptionSet.hpp"
 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/utilities/jfrHashtable.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
@@ -64,23 +63,13 @@
   return mutable_predicate(set, id);
 }
 
-const int initial_array_size = 256;
+const int initial_array_size = 64;
 
 template <typename T>
 static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
   return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
 }
 
-template <typename T>
-static GrowableArray<T>* resource_allocate_array(int size = initial_array_size) {
-  return new GrowableArray<T>(size);
-}
-
-static void sort_array(GrowableArray<traceid>* ar) {
-  assert(ar != NULL, "invariant");
-  ar->sort(sort_traceid);
-}
-
 static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
 
 class ThreadIdExclusiveAccess : public StackObj {
@@ -93,6 +82,11 @@
 
 Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
 
+static bool has_thread_exited(traceid tid) {
+  assert(tid != 0, "invariant");
+  return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid);
+}
+
 static void add_to_unloaded_thread_set(traceid tid) {
   ThreadIdExclusiveAccess lock;
   if (unloaded_thread_id_set == NULL) {
@@ -101,24 +95,22 @@
   add(unloaded_thread_id_set, tid);
 }
 
-static bool has_thread_exited(traceid tid) {
-  assert(tid != 0, "invariant");
-  return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid);
+void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
+  assert(jt != NULL, "invariant");
+  if (LeakProfiler::is_running()) {
+    add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id());
+  }
 }
 
+// Track the set of unloaded klasses during a chunk / epoch.
+// Methods in stacktraces belonging to unloaded klasses must not be accessed.
 static GrowableArray<traceid>* unloaded_klass_set = NULL;
 
-static void sort_unloaded_klass_set() {
-  if (unloaded_klass_set != NULL) {
-    sort_array(unloaded_klass_set);
-  }
-}
-
 static void add_to_unloaded_klass_set(traceid klass_id) {
   if (unloaded_klass_set == NULL) {
     unloaded_klass_set = c_heap_allocate_array<traceid>();
   }
-  unloaded_klass_set->append(klass_id);
+  add(unloaded_klass_set, klass_id);
 }
 
 void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) {
@@ -126,36 +118,8 @@
   add_to_unloaded_klass_set(TRACE_ID(k));
 }
 
-static bool is_klass_unloaded(traceid klass_id) {
-  return unloaded_klass_set != NULL && predicate(unloaded_klass_set, klass_id);
-}
-
-static GrowableArray<traceid>* id_set = NULL;
-static GrowableArray<traceid>* stack_trace_id_set = NULL;
-
-static bool is_processed(traceid id) {
-  assert(id != 0, "invariant");
-  assert(id_set != NULL, "invariant");
-  return mutable_predicate(id_set, id);
-}
-
-static bool is_processed_or_unloaded(traceid klass_id) {
-  assert(klass_id != 0, "invariant");
-  return is_processed(klass_id) || is_klass_unloaded(klass_id);
-}
-
-static bool should_process(traceid klass_id) {
-  return klass_id != 0 && !is_processed_or_unloaded(klass_id);
-}
-
-static bool is_stack_trace_processed(traceid stack_trace_id) {
-  assert(stack_trace_id != 0, "invariant");
-  assert(stack_trace_id_set != NULL, "invariant");
-  return mutable_predicate(stack_trace_id_set, stack_trace_id);
-}
-
 template <typename Processor>
-static void do_samples(ObjectSample* sample, const ObjectSample* const end, Processor& processor) {
+static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
   assert(sample != NULL, "invariant");
   while (sample != end) {
     processor.sample_do(sample);
@@ -172,40 +136,6 @@
   do_samples(last, all ? NULL : sampler->last_resolved(), processor);
 }
 
-void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
-  assert(jt != NULL, "invariant");
-  if (LeakProfiler::is_running()) {
-    add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id());
-  }
-}
-
-class CheckpointBlobInstaller {
- private:
-  const JfrCheckpointBlobHandle& _cp;
- public:
-  CheckpointBlobInstaller(const JfrCheckpointBlobHandle& cp) : _cp(cp) {}
-  void sample_do(ObjectSample* sample) {
-    if (!sample->is_dead()) {
-      sample->set_klass_checkpoint(_cp);
-    }
-  }
-};
-
-static void install_checkpoint_blob(JfrCheckpointWriter& writer) {
-  assert(writer.has_data(), "invariant");
-  const JfrCheckpointBlobHandle h_cp = writer.copy();
-  CheckpointBlobInstaller installer(h_cp);
-  iterate_samples(installer, true);
-}
-
-void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  assert(LeakProfiler::is_running(), "invariant");
-  if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
-    install_checkpoint_blob(writer);
-  }
-}
-
 class SampleMarker {
  private:
   ObjectSampleMarker& _marker;
@@ -234,97 +164,52 @@
   return sample_marker.count();
 }
 
-#ifdef ASSERT
-static traceid get_klass_id(const Klass* k) {
-  assert(k != NULL, "invariant");
-  return TRACE_ID(k);
-}
-#endif
-
-static traceid get_klass_id(traceid method_id) {
-  assert(method_id != 0, "invariant");
-  return method_id >> TRACE_ID_SHIFT;
-}
-
-static int get_method_id_num(traceid method_id) {
-  return (int)(method_id & METHOD_ID_NUM_MASK);
-}
+class BlobCache {
+  typedef HashTableHost<JfrBlobHandle, traceid, JfrHashtableEntry, BlobCache> BlobTable;
+  typedef BlobTable::HashEntry BlobEntry;
+ private:
+  BlobTable _table;
+  traceid _lookup_id;
+ public:
+  BlobCache(size_t size) : _table(this, size), _lookup_id(0) {}
+  JfrBlobHandle get(const ObjectSample* sample);
+  void put(const ObjectSample* sample, const JfrBlobHandle& blob);
+  // Hash table callbacks
+  void link(const BlobEntry* entry) const;
+  bool equals(uintptr_t hash, const BlobEntry* entry) const;
+  void unlink(BlobEntry* entry) const;
+};
 
-static Method* lookup_method_in_klasses(Klass* klass, int orig_method_id_num) {
-  assert(klass != NULL, "invariant");
-  assert(!is_klass_unloaded(get_klass_id(klass)), "invariant");
-  while (klass != NULL) {
-    if (klass->is_instance_klass()) {
-      Method* const m = InstanceKlass::cast(klass)->method_with_orig_idnum(orig_method_id_num);
-      if (m != NULL) {
-        return m;
-      }
-    }
-    klass = klass->super();
-  }
-  return NULL;
-}
-
-static Method* lookup_method_in_interfaces(Klass* klass, int orig_method_id_num) {
-  assert(klass != NULL, "invariant");
-  const Array<InstanceKlass*>* const all_ifs = InstanceKlass::cast(klass)->transitive_interfaces();
-  const int num_ifs = all_ifs->length();
-  for (int i = 0; i < num_ifs; i++) {
-    InstanceKlass* const ik = all_ifs->at(i);
-    Method* const m = ik->method_with_orig_idnum(orig_method_id_num);
-    if (m != NULL) {
-      return m;
-    }
-  }
-  return NULL;
+JfrBlobHandle BlobCache::get(const ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  _lookup_id = sample->stack_trace_id();
+  assert(_lookup_id != 0, "invariant");
+  BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash());
+  return entry != NULL ? entry->literal() : JfrBlobHandle();
 }
 
-static Method* lookup_method(Klass* klass, int orig_method_id_num) {
-  Method* m = lookup_method_in_klasses(klass, orig_method_id_num);
-  if (m == NULL) {
-    m = lookup_method_in_interfaces(klass, orig_method_id_num);
-  }
-  assert(m != NULL, "invariant");
-  return m;
-}
-
-static void write_stack_trace(traceid id, bool reached_root, u4 nr_of_frames, JfrCheckpointWriter* writer) {
-  assert(writer != NULL, "invariant");
-  writer->write(id);
-  writer->write((u1)!reached_root);
-  writer->write(nr_of_frames);
-}
-
-static void write_stack_frame(const JfrStackFrame* frame, JfrCheckpointWriter* writer) {
-  assert(frame != NULL, "invariant");
-  frame->write(*writer);
+void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) {
+  assert(sample != NULL, "invariant");
+  assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant");
+  _lookup_id = sample->stack_trace_id();
+  assert(_lookup_id != 0, "invariant");
+  _table.put(sample->stack_trace_hash(), blob);
 }
 
-bool ObjectSampleCheckpoint::tag(const JfrStackTrace* trace, JfrCheckpointWriter* writer /* NULL */) {
-  assert(trace != NULL, "invariant");
-  if (is_stack_trace_processed(trace->id())) {
-    return false;
-  }
-  if (writer != NULL) {
-    // JfrStackTrace
-    write_stack_trace(trace->id(), trace->_reached_root, trace->_nr_of_frames, writer);
-  }
-  traceid last_id = 0;
-  for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
-    if (writer != NULL) {
-      // JfrStackFrame(s)
-      write_stack_frame(&trace->_frames[i], writer);
-    }
-    const traceid method_id = trace->_frames[i]._methodid;
-    if (last_id == method_id || is_processed(method_id) || is_klass_unloaded(get_klass_id(method_id))) {
-      continue;
-    }
-    last_id = method_id;
-    InstanceKlass* const ik = trace->_frames[i]._klass;
-    assert(ik != NULL, "invariant");
-    JfrTraceId::use(ik, lookup_method(ik, get_method_id_num(method_id)));
-  }
-  return true;
+inline void BlobCache::link(const BlobEntry* entry) const {
+  assert(entry != NULL, "invariant");
+  assert(entry->id() == 0, "invariant");
+  entry->set_id(_lookup_id);
+}
+
+inline bool BlobCache::equals(uintptr_t hash, const BlobEntry* entry) const {
+  assert(entry != NULL, "invariant");
+  assert(entry->hash() == hash, "invariant");
+  return entry->id() == _lookup_id;
+}
+
+inline void BlobCache::unlink(BlobEntry* entry) const {
+  assert(entry != NULL, "invariant");
 }
 
 static bool stack_trace_precondition(const ObjectSample* sample) {
@@ -332,84 +217,68 @@
   return sample->has_stack_trace_id() && !sample->is_dead();
 }
 
-class StackTraceTagger {
+class StackTraceBlobInstaller {
  private:
-  JfrStackTraceRepository& _stack_trace_repo;
+  const JfrStackTraceRepository& _stack_trace_repo;
+  BlobCache _cache;
+  const JfrStackTrace* resolve(const ObjectSample* sample);
+  void install(ObjectSample* sample);
  public:
-  StackTraceTagger(JfrStackTraceRepository& stack_trace_repo) : _stack_trace_repo(stack_trace_repo) {}
+  StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo) :
+    _stack_trace_repo(stack_trace_repo),
+    _cache(JfrOptionSet::old_object_queue_size()) {}
   void sample_do(ObjectSample* sample) {
     if (stack_trace_precondition(sample)) {
-      assert(sample->stack_trace_id() == sample->stack_trace()->id(), "invariant");
-      ObjectSampleCheckpoint::tag(sample->stack_trace(), NULL);
+      install(sample);
     }
   }
 };
 
-static bool written = false;
-
-static void tag_old_stack_traces(ObjectSample* last_resolved, JfrStackTraceRepository& stack_trace_repo) {
-  assert(last_resolved != NULL, "invariant");
-  assert(stack_trace_id_set != NULL, "invariant");
-  assert(stack_trace_id_set->is_empty(), "invariant");
-  if (written) {
-    // written -> retagged
-    written = false;
-    return;
-  }
-  StackTraceTagger tagger(stack_trace_repo);
-  do_samples(last_resolved, NULL, tagger);
+const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) {
+  return _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id());
 }
 
-class StackTraceResolver {
- private:
-  JfrStackTraceRepository& _stack_trace_repo;
- public:
-  StackTraceResolver(JfrStackTraceRepository& stack_trace_repo) : _stack_trace_repo(stack_trace_repo) {}
-  void install_to_sample(ObjectSample* sample, const JfrStackTrace* stack_trace);
-  void sample_do(ObjectSample* sample) {
-    if (stack_trace_precondition(sample)) {
-      install_to_sample(sample, _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id()));
-    }
-  }
-};
-
 #ifdef ASSERT
 static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) {
   assert(sample != NULL, "invariant");
   assert(!sample->is_dead(), "invariant");
+  assert(!sample->has_stacktrace(), "invariant");
   assert(stack_trace != NULL, "invariant");
   assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant");
   assert(stack_trace->id() == sample->stack_trace_id(), "invariant");
 }
 #endif
 
-void StackTraceResolver::install_to_sample(ObjectSample* sample, const JfrStackTrace* stack_trace) {
+void StackTraceBlobInstaller::install(ObjectSample* sample) {
+  JfrBlobHandle blob = _cache.get(sample);
+  if (blob.valid()) {
+    sample->set_stacktrace(blob);
+    return;
+  }
+  const JfrStackTrace* stack_trace = resolve(sample);
   DEBUG_ONLY(validate_stack_trace(sample, stack_trace));
-  JfrStackTrace* const sample_stack_trace = const_cast<JfrStackTrace*>(sample->stack_trace());
-  if (sample_stack_trace != NULL) {
-    if (sample_stack_trace->id() != stack_trace->id()) {
-      *sample_stack_trace = *stack_trace; // copy
-    }
-  } else {
-    sample->set_stack_trace(new JfrStackTrace(stack_trace->id(), *stack_trace, NULL)); // new
-  }
-  assert(sample->stack_trace() != NULL, "invariant");
+  JfrCheckpointWriter writer;
+  writer.write_type(TYPE_STACKTRACE);
+  writer.write_count(1);
+  ObjectSampleCheckpoint::write_stacktrace(stack_trace, writer);
+  blob = writer.move();
+  _cache.put(sample, blob);
+  sample->set_stacktrace(blob);
 }
 
-static void allocate_traceid_working_sets() {
-  const int set_size = JfrOptionSet::old_object_queue_size();
-  stack_trace_id_set = resource_allocate_array<traceid>(set_size);
-  id_set = resource_allocate_array<traceid>(set_size);
-  sort_unloaded_klass_set();
+static GrowableArray<traceid>* id_set = NULL;
+
+static void allocate_traceid_working_set() {
+  id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size());
 }
 
-static void resolve_stack_traces(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo, const ObjectSample* last_resolved) {
+static void resolve_stack_traces(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
   assert(sampler != NULL, "invariant");
   const ObjectSample* const last = sampler->last();
-  if (last != last_resolved) {
-    StackTraceResolver stack_trace_resolver(stack_trace_repo);
-    iterate_samples(stack_trace_resolver);
-    sampler->set_last_resolved(last);
+  if (last != sampler->last_resolved()) {
+    allocate_traceid_working_set();
+    StackTraceBlobInstaller installer(stack_trace_repo);
+    iterate_samples(installer);
   }
 }
 
@@ -417,137 +286,119 @@
 void ObjectSampleCheckpoint::on_rotation(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
   assert(sampler != NULL, "invariant");
   assert(LeakProfiler::is_running(), "invariant");
-  ObjectSample* const last_resolved = const_cast<ObjectSample*>(sampler->last_resolved());
-  if (last_resolved != NULL) {
-    allocate_traceid_working_sets();
-    tag_old_stack_traces(last_resolved, stack_trace_repo);
-  }
-  resolve_stack_traces(sampler, stack_trace_repo, last_resolved);
+  resolve_stack_traces(sampler, stack_trace_repo);
+}
+
+static traceid get_klass_id(traceid method_id) {
+  assert(method_id != 0, "invariant");
+  return method_id >> TRACE_ID_SHIFT;
+}
+
+static bool is_klass_unloaded(traceid method_id) {
+  return unloaded_klass_set != NULL && predicate(unloaded_klass_set, get_klass_id(method_id));
+}
+
+static bool is_processed(traceid id) {
+  assert(id != 0, "invariant");
+  assert(id_set != NULL, "invariant");
+  return mutable_predicate(id_set, id);
 }
 
-static void reset_blob_write_state(const ObjectSample* sample) {
-  assert(sample != NULL, "invariant");
-  if (sample->has_thread_checkpoint()) {
-    sample->thread_checkpoint()->reset_write_state();
+void ObjectSampleCheckpoint::tag(const JfrStackFrame& frame, traceid method_id) {
+  if (is_processed(method_id) || is_klass_unloaded(method_id)) {
+    return;
   }
-  if (sample->has_klass_checkpoint()) {
-    sample->klass_checkpoint()->reset_write_state();
+  assert(frame._method != NULL, "invariant");
+  JfrTraceId::set_leakp(frame._method->method_holder(), frame._method);
+}
+
+void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
+  assert(trace != NULL, "invariant");
+  // JfrStackTrace
+  writer.write(trace->id());
+  writer.write((u1)!trace->_reached_root);
+  writer.write(trace->_nr_of_frames);
+  traceid last_id = 0;
+  // JfrStackFrames
+  for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
+    trace->_frames[i].write(writer);
+    const traceid method_id = trace->_frames[i]._methodid;
+    if (method_id != last_id) {
+      tag(trace->_frames[i], method_id);
+      last_id = method_id;
+    }
   }
 }
 
-static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
-  if (sample->has_thread_checkpoint() && has_thread_exited(sample->thread_id())) {
-    sample->thread_checkpoint()->exclusive_write(writer);
+static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) {
+  if (reset) {
+    blob->reset_write_state();
+    return;
+  }
+  blob->exclusive_write(writer);
+}
+
+static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
+  if (sample->has_type_set()) {
+    write_blob(sample->type_set(), writer, reset);
   }
 }
 
-static void write_klass_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
-  if (sample->has_klass_checkpoint()) {
-    sample->klass_checkpoint()->exclusive_write(writer);
+static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
+  assert(sample->has_thread(), "invariant");
+  if (has_thread_exited(sample->thread_id())) {
+    write_blob(sample->thread(), writer, reset);
   }
 }
 
-static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer) {
-  assert(sample != NULL, "invariant");
-  write_thread_blob(sample, writer);
-  write_klass_blob(sample, writer);
+static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
+  if (sample->has_stacktrace()) {
+    write_blob(sample->stacktrace(), writer, reset);
+  }
 }
 
-class CheckpointBlobWriter {
+static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
+  assert(sample != NULL, "invariant");
+  write_stacktrace_blob(sample, writer, reset);
+  write_thread_blob(sample, writer, reset);
+  write_type_set_blob(sample, writer, reset);
+}
+
+class BlobWriter {
  private:
   const ObjectSampler* _sampler;
   JfrCheckpointWriter& _writer;
   const jlong _last_sweep;
+  bool _reset;
  public:
-  CheckpointBlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
-    _sampler(sampler), _writer(writer), _last_sweep(last_sweep) {}
+  BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
+    _sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false)  {}
   void sample_do(ObjectSample* sample) {
     if (sample->is_alive_and_older_than(_last_sweep)) {
-      write_blobs(sample, _writer);
+      write_blobs(sample, _writer, _reset);
     }
   }
-};
-
-class CheckpointBlobStateReset {
- private:
-  const ObjectSampler* _sampler;
-  const jlong _last_sweep;
- public:
-  CheckpointBlobStateReset(const ObjectSampler* sampler, jlong last_sweep) : _sampler(sampler), _last_sweep(last_sweep) {}
-  void sample_do(ObjectSample* sample) {
-    if (sample->is_alive_and_older_than(_last_sweep)) {
-      reset_blob_write_state(sample);
-    }
+  void set_reset() {
+    _reset = true;
   }
 };
 
-static void reset_write_state_for_blobs(const ObjectSampler* sampler, jlong last_sweep) {
-  CheckpointBlobStateReset state_reset(sampler, last_sweep);
-  iterate_samples(state_reset, true);
-}
-
-static void write_sample_blobs(const ObjectSampler* sampler, jlong last_sweep, Thread* thread) {
+static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) {
+  // sample set is predicated on time of last sweep
+  const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
   JfrCheckpointWriter writer(thread, false);
-  CheckpointBlobWriter cbw(sampler, writer, last_sweep);
+  BlobWriter cbw(sampler, writer, last_sweep);
   iterate_samples(cbw, true);
-  reset_write_state_for_blobs(sampler, last_sweep);
-}
-
-class StackTraceWriter {
- private:
-  JfrStackTraceRepository& _stack_trace_repo;
-  JfrCheckpointWriter& _writer;
-  const jlong _last_sweep;
-  int _count;
- public:
-  StackTraceWriter(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer, jlong last_sweep) :
-    _stack_trace_repo(stack_trace_repo), _writer(writer), _last_sweep(last_sweep), _count(0) {}
-  void sample_do(ObjectSample* sample) {
-    if (sample->is_alive_and_older_than(_last_sweep)) {
-      if (stack_trace_precondition(sample)) {
-        assert(sample->stack_trace_id() == sample->stack_trace()->id(), "invariant");
-        if (ObjectSampleCheckpoint::tag(sample->stack_trace(), &_writer)) {
-          ++_count;
-        }
-      }
-    }
-  }
-  int count() const {
-    return _count;
-  }
-};
-
-static void write_stack_traces(ObjectSampler* sampler, JfrStackTraceRepository& repo, jlong last_sweep, Thread* thread) {
-  assert(sampler != NULL, "invariant");
-  ObjectSample* const last_resolved = const_cast<ObjectSample*>(sampler->last_resolved());
-  if (last_resolved == NULL) {
-    // no old traces
-    return;
-  }
-  JfrCheckpointWriter writer(thread);
-  const JfrCheckpointContext ctx = writer.context();
-  writer.write_type(TYPE_STACKTRACE);
-  const jlong count_offset = writer.reserve(sizeof(u4));
-  allocate_traceid_working_sets();
-  StackTraceWriter sw(repo, writer, last_sweep);
-  do_samples(last_resolved, NULL, sw);
-  if (sw.count() == 0) {
-    writer.set_context(ctx);
-    return;
-  }
-  writer.write_count((u4)sw.count(), count_offset);
-  written = true;
+  // reset blob write states
+  cbw.set_reset();
+  iterate_samples(cbw, true);
 }
 
 void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
   assert(sampler != NULL, "invariant");
   assert(edge_store != NULL, "invariant");
   assert(thread != NULL, "invariant");
-  // sample set is predicated on time of last sweep
-  const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
-  JfrStackTraceRepository& repo = JfrStackTraceRepository::instance();
-  write_stack_traces(sampler, repo, last_sweep, thread);
-  write_sample_blobs(sampler, last_sweep, thread);
+  write_sample_blobs(sampler, emit_all, thread);
   // write reference chains
   if (!edge_store->is_empty()) {
     JfrCheckpointWriter writer(thread);
@@ -555,3 +406,48 @@
     edge_store->iterate(osw);
   }
 }
+
+class BlobInstaller {
+ private:
+  const JfrBlobHandle& _blob;
+ public:
+  BlobInstaller(const JfrBlobHandle& blob) : _blob(blob) {}
+  void sample_do(ObjectSample* sample) {
+    if (!sample->is_dead()) {
+      sample->set_type_set(_blob);
+    }
+  }
+};
+
+static void clear_unloaded_klass_set() {
+  if (unloaded_klass_set != NULL && unloaded_klass_set->is_nonempty()) {
+    unloaded_klass_set->clear();
+  }
+}
+
+static void install_blob(JfrCheckpointWriter& writer, bool copy = false) {
+  assert(writer.has_data(), "invariant");
+  const JfrBlobHandle blob = copy ? writer.copy() : writer.move();
+  BlobInstaller installer(blob);
+  iterate_samples(installer);
+}
+
+void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
+  assert(LeakProfiler::is_running(), "invariant");
+  const ObjectSample* last = ObjectSampler::sampler()->last();
+  if (writer.has_data() && last != NULL) {
+    install_blob(writer);
+    ObjectSampler::sampler()->set_last_resolved(last);
+  }
+  // Only happens post chunk rotation and we would not have hit another class unload safepoint.
+  // Therefore it is safe to release the set of unloaded classes tracked during the previous epoch.
+  clear_unloaded_klass_set();
+}
+
+void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(LeakProfiler::is_running(), "invariant");
+  if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
+    install_blob(writer, true);
+  }
+}
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -31,6 +31,7 @@
 class Klass;
 class JavaThread;
 class JfrCheckpointWriter;
+class JfrStackFrame;
 class JfrStackTrace;
 class JfrStackTraceRepository;
 class ObjectSample;
@@ -39,14 +40,20 @@
 class Thread;
 
 class ObjectSampleCheckpoint : AllStatic {
+  friend class EventEmitter;
+  friend class PathToGcRootsOperation;
+  friend class StackTraceBlobInstaller;
+ private:
+  static int save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
+  static void tag(const JfrStackFrame& frame, traceid method_id);
+  static void write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer);
+  static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
  public:
   static void on_klass_unload(const Klass* k);
+  static void on_type_set(JfrCheckpointWriter& writer);
   static void on_type_set_unload(JfrCheckpointWriter& writer);
   static void on_thread_exit(JavaThread* jt);
   static void on_rotation(ObjectSampler* sampler, JfrStackTraceRepository& repo);
-  static bool tag(const JfrStackTrace* trace, JfrCheckpointWriter* writer = NULL);
-  static int save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
-  static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -137,26 +137,24 @@
             typename,
             size_t>
   friend class HashTableHost;
-  typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, Entry, FieldTable, 109> FieldInfoTable;
+  typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, JfrHashtableEntry, FieldTable, 109> FieldInfoTable;
  public:
   typedef FieldInfoTable::HashEntry FieldInfoEntry;
 
  private:
   static traceid _field_id_counter;
   FieldInfoTable* _table;
+  const ObjectSampleFieldInfo* _lookup;
 
-  void assign_id(FieldInfoEntry* entry) {
+  void link(FieldInfoEntry* entry) {
     assert(entry != NULL, "invariant");
     entry->set_id(++_field_id_counter);
   }
 
-  bool equals(const ObjectSampleFieldInfo* query, uintptr_t hash, const FieldInfoEntry* entry) {
+  bool equals(uintptr_t hash, const FieldInfoEntry* entry) {
     assert(hash == entry->hash(), "invariant");
-    assert(query != NULL, "invariant");
-    const ObjectSampleFieldInfo* stored = entry->literal();
-    assert(stored != NULL, "invariant");
-    assert(stored->_field_name_symbol->identity_hash() == query->_field_name_symbol->identity_hash(), "invariant");
-    return stored->_field_modifiers == query->_field_modifiers;
+    assert(_lookup != NULL, "invariant");
+    return entry->literal()->_field_modifiers == _lookup->_field_modifiers;
   }
 
   void unlink(FieldInfoEntry* entry) {
@@ -165,7 +163,7 @@
   }
 
  public:
-  FieldTable() : _table(new FieldInfoTable(this)) {}
+  FieldTable() : _table(new FieldInfoTable(this)), _lookup(NULL) {}
   ~FieldTable() {
     assert(_table != NULL, "invariant");
     delete _table;
@@ -173,8 +171,8 @@
 
   traceid store(const ObjectSampleFieldInfo* field_info) {
     assert(field_info != NULL, "invariant");
-    const FieldInfoEntry& entry =_table->lookup_put(field_info,
-                                                    field_info->_field_name_symbol->identity_hash());
+    _lookup = field_info;
+    const FieldInfoEntry& entry = _table->lookup_put(field_info->_field_name_symbol->identity_hash(), field_info);
     return entry.id();
   }
 
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -25,9 +25,8 @@
 #ifndef SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
 #define SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP
 
-#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
-#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
 #include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
 #include "jfr/utilities/jfrTime.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 #include "memory/allocation.hpp"
@@ -41,17 +40,14 @@
  * allocated, the thread and the stack trace.
  */
 class ObjectSample : public JfrCHeapObj {
-  friend class CheckpointInstall;
-  friend class ObjectResolver;
-  friend class ObjectSampleCheckpoint;
   friend class ObjectSampler;
   friend class SampleList;
  private:
   ObjectSample* _next;
   ObjectSample* _previous;
-  mutable const JfrStackTrace* _stack_trace;
-  JfrCheckpointBlobHandle _thread_cp;
-  JfrCheckpointBlobHandle _klass_cp;
+  JfrBlobHandle _stacktrace;
+  JfrBlobHandle _thread;
+  JfrBlobHandle _type_set;
   oop _object;
   Ticks _allocation_time;
   traceid _stack_trace_id;
@@ -68,12 +64,9 @@
   }
 
   void release_references() {
-    if (_thread_cp.valid()) {
-      _thread_cp.~JfrCheckpointBlobHandle();
-    }
-    if (_klass_cp.valid()) {
-      _klass_cp.~JfrCheckpointBlobHandle();
-    }
+    _stacktrace.~JfrBlobHandle();
+    _thread.~JfrBlobHandle();
+    _type_set.~JfrBlobHandle();
   }
 
   void reset() {
@@ -83,18 +76,12 @@
     _dead = false;
   }
 
-  ~ObjectSample() {
-    if (_stack_trace != NULL) {
-      delete _stack_trace;
-    }
-  }
-
  public:
   ObjectSample() : _next(NULL),
                    _previous(NULL),
-                   _stack_trace(NULL),
-                   _thread_cp(),
-                   _klass_cp(),
+                   _stacktrace(),
+                   _thread(),
+                   _type_set(),
                    _object(NULL),
                    _allocation_time(),
                    _stack_trace_id(0),
@@ -207,18 +194,6 @@
     _stack_trace_hash = hash;
   }
 
-  const JfrStackTrace* stack_trace() const {
-    return _stack_trace;
-  }
-
-  void set_stack_trace(const JfrStackTrace* trace) const {
-    _stack_trace = trace;
-  }
-
-  bool has_thread() const {
-    return _thread_id != 0;
-  }
-
   traceid thread_id() const {
     return _thread_id;
   }
@@ -232,37 +207,51 @@
       _allocation_time.ft_value() : _allocation_time.value()) < time_stamp;
   }
 
-  const JfrCheckpointBlobHandle& thread_checkpoint() const {
-    return _thread_cp;
+  const JfrBlobHandle& stacktrace() const {
+    return _stacktrace;
   }
 
-  bool has_thread_checkpoint() const {
-    return _thread_cp.valid();
+  bool has_stacktrace() const {
+    return _stacktrace.valid();
   }
 
-  // JfrCheckpointBlobHandle assignment operator
+  // JfrBlobHandle assignment operator
   // maintains proper reference counting
-  void set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) {
-    if (_thread_cp != ref) {
-      _thread_cp = ref;
+  void set_stacktrace(const JfrBlobHandle& ref) {
+    if (_stacktrace != ref) {
+      _stacktrace = ref;
     }
   }
 
-  const JfrCheckpointBlobHandle& klass_checkpoint() const {
-    return _klass_cp;
+  const JfrBlobHandle& thread() const {
+    return _thread;
   }
 
-  bool has_klass_checkpoint() const {
-    return _klass_cp.valid();
+  bool has_thread() const {
+    return _thread.valid();
+  }
+
+  void set_thread(const JfrBlobHandle& ref) {
+    if (_thread != ref) {
+      _thread = ref;
+    }
   }
 
-  void set_klass_checkpoint(const JfrCheckpointBlobHandle& ref) {
-    if (_klass_cp != ref) {
-      if (_klass_cp.valid()) {
-        _klass_cp->set_next(ref);
+  const JfrBlobHandle& type_set() const {
+    return _type_set;
+  }
+
+  bool has_type_set() const {
+    return _type_set.valid();
+  }
+
+  void set_type_set(const JfrBlobHandle& ref) {
+    if (_type_set != ref) {
+      if (_type_set.valid()) {
+        _type_set->set_next(ref);
         return;
       }
-      _klass_cp = ref;
+      _type_set = ref;
     }
   }
 };
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -113,10 +113,10 @@
   if (tl->is_excluded()) {
     return 0;
   }
-  if (!tl->has_thread_checkpoint()) {
-    JfrCheckpointManager::create_thread_checkpoint(thread);
+  if (!tl->has_thread_blob()) {
+    JfrCheckpointManager::create_thread_blob(thread);
   }
-  assert(tl->has_thread_checkpoint(), "invariant");
+  assert(tl->has_thread_blob(), "invariant");
   return tl->thread_id();
 }
 
@@ -148,7 +148,7 @@
   assert(obj != NULL, "invariant");
   assert(thread_id != 0, "invariant");
   assert(thread != NULL, "invariant");
-  assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+  assert(thread->jfr_thread_local()->has_thread_blob(), "invariant");
 
   if (_dead_samples) {
     scavenge();
@@ -174,7 +174,7 @@
   sample->set_thread_id(thread_id);
 
   const JfrThreadLocal* const tl = thread->jfr_thread_local();
-  sample->set_thread_checkpoint(tl->thread_checkpoint());
+  sample->set_thread(tl->thread_blob());
 
   const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
   if (stacktrace_hash != 0) {
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -75,7 +75,6 @@
 
  public:
   static ObjectSampler* sampler();
-
   // For operations that require exclusive access (non-safepoint)
   static ObjectSampler* acquire();
   static void release();
--- a/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -50,12 +50,12 @@
   SampleList(size_t limit, size_t cache_size = 0);
   ~SampleList();
 
-  void set_last_resolved(const ObjectSample* sample);
   ObjectSample* get();
+  ObjectSample* first() const;
   ObjectSample* last() const;
-  ObjectSample* first() const;
+  const ObjectSample* last_resolved() const;
+  void set_last_resolved(const ObjectSample* sample);
   void release(ObjectSample* sample);
-  const ObjectSample* last_resolved() const;
   ObjectSample* reuse(ObjectSample* sample);
   bool is_full() const;
   size_t count() const;
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-
-JfrCheckpointBlob::JfrCheckpointBlob(const u1* checkpoint, size_t size) :
-  _checkpoint(JfrCHeapObj::new_array<u1>(size)),
-  _size(size),
-  _next(),
-  _written(false) {
-  assert(checkpoint != NULL, "invariant");
-  assert(_checkpoint != NULL, "invariant");
-  memcpy(const_cast<u1*>(_checkpoint), checkpoint, size);
-}
-
-JfrCheckpointBlob::~JfrCheckpointBlob() {
-  JfrCHeapObj::free(const_cast<u1*>(_checkpoint), _size);
-}
-
-const JfrCheckpointBlobHandle& JfrCheckpointBlob::next() const {
-  return _next;
-}
-
-void JfrCheckpointBlob::write_this(JfrCheckpointWriter& writer) const {
-  writer.bytes(_checkpoint, _size);
-}
-
-void JfrCheckpointBlob::exclusive_write(JfrCheckpointWriter& writer) const {
-  if (!_written) {
-    write_this(writer);
-    _written = true;
-  }
-  if (_next.valid()) {
-    _next->exclusive_write(writer);
-  }
-}
-
-void JfrCheckpointBlob::write(JfrCheckpointWriter& writer) const {
-  write_this(writer);
-  if (_next.valid()) {
-    _next->write(writer);
-  }
-}
-
-void JfrCheckpointBlob::reset_write_state() const {
-  if (_written) {
-    _written = false;
-  }
-  if (_next.valid()) {
-    _next->reset_write_state();
-  }
-}
-
-void JfrCheckpointBlob::set_next(const JfrCheckpointBlobHandle& ref) {
-  if (_next == ref) {
-    return;
-  }
-  assert(_next != ref, "invariant");
-  if (_next.valid()) {
-    _next->set_next(ref);
-    return;
-  }
-  _next = ref;
-}
-
-JfrCheckpointBlobHandle JfrCheckpointBlob::make(const u1* checkpoint, size_t size) {
-  const JfrCheckpointBlob* cp_blob = new JfrCheckpointBlob(checkpoint, size);
-  assert(cp_blob != NULL, "invariant");
-  return JfrCheckpointBlobReference::make(cp_blob);
-}
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
-#define SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
-
-#include "jfr/utilities/jfrAllocation.hpp"
-#include "jfr/utilities/jfrRefCountPointer.hpp"
-
-class JfrCheckpointBlob;
-class JfrCheckpointWriter;
-
-typedef RefCountPointer<JfrCheckpointBlob, MultiThreadedRefCounter> JfrCheckpointBlobReference;
-typedef RefCountHandle<JfrCheckpointBlobReference> JfrCheckpointBlobHandle;
-
-class JfrCheckpointBlob : public JfrCHeapObj {
-  template <typename, typename>
-  friend class RefCountPointer;
- private:
-  const u1* _checkpoint;
-  const size_t _size;
-  JfrCheckpointBlobHandle _next;
-  mutable bool _written;
-
-  JfrCheckpointBlob(const u1* checkpoint, size_t size);
-  ~JfrCheckpointBlob();
-  const JfrCheckpointBlobHandle& next() const;
-  void write_this(JfrCheckpointWriter& writer) const;
-
- public:
-  void write(JfrCheckpointWriter& writer) const;
-  void exclusive_write(JfrCheckpointWriter& writer) const;
-  void reset_write_state() const;
-  void set_next(const JfrCheckpointBlobHandle& ref);
-  static JfrCheckpointBlobHandle make(const u1* checkpoint, size_t size);
-};
-
-#endif // SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -254,12 +254,12 @@
   return read_data<juint>(data + types_offset);
 }
 
-static void write_checkpoint_header(JfrChunkWriter& cw, int64_t offset_prev_cp_event, const u1* data) {
+static void write_checkpoint_header(JfrChunkWriter& cw, int64_t delta_to_last_checkpoint, const u1* data) {
   cw.reserve(sizeof(u4));
   cw.write<u8>(EVENT_CHECKPOINT);
   cw.write(starttime(data));
   cw.write(duration(data));
-  cw.write(offset_prev_cp_event);
+  cw.write(delta_to_last_checkpoint);
   cw.write(checkpoint_type(data));
   cw.write(number_of_types(data));
 }
@@ -273,9 +273,9 @@
   assert(data != NULL, "invariant");
   const int64_t event_begin = cw.current_offset();
   const int64_t last_checkpoint_event = cw.last_checkpoint_offset();
-  const int64_t delta = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin;
+  const int64_t delta_to_last_checkpoint = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin;
   const int64_t checkpoint_size = total_size(data);
-  write_checkpoint_header(cw, delta, data);
+  write_checkpoint_header(cw, delta_to_last_checkpoint, data);
   write_checkpoint_content(cw, data, checkpoint_size);
   const int64_t event_size = cw.current_offset() - event_begin;
   cw.write_padded_at_offset<u4>(event_size, event_begin);
@@ -440,7 +440,7 @@
 }
 
 bool JfrCheckpointManager::is_type_set_required() {
-  return JfrTraceIdEpoch::is_klass_tagged_in_epoch();
+  return JfrTraceIdEpoch::has_changed_tag_state();
 }
 
 bool JfrCheckpointManager::is_constant_set_required() {
@@ -457,8 +457,8 @@
   flush();
 }
 
-void JfrCheckpointManager::create_thread_checkpoint(Thread* t) {
-  JfrTypeManager::create_thread_checkpoint(t);
+void JfrCheckpointManager::create_thread_blob(Thread* t) {
+  JfrTypeManager::create_thread_blob(t);
 }
 
 void JfrCheckpointManager::write_thread_checkpoint(Thread* t) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -104,7 +104,7 @@
   size_t flush_type_set();
   void flush_constant_set();
   static void write_type_set_for_unloaded_classes();
-  static void create_thread_checkpoint(Thread* t);
+  static void create_thread_blob(Thread* t);
   static void write_thread_checkpoint(Thread* t);
 
   friend class JfrRecorder;
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
 #include "jfr/writers/jfrBigEndianWriter.hpp"
 
 JfrCheckpointFlush::JfrCheckpointFlush(Type* old, size_t used, size_t requested, Thread* t) :
@@ -101,7 +102,7 @@
   assert(this->used_size() > sizeof(JfrCheckpointEntry), "invariant");
   const int64_t size = this->current_offset();
   assert(size + this->start_pos() == this->current_pos(), "invariant");
-  write_checkpoint_header(const_cast<u1*>(this->start_pos()), size, _time, _type, count());
+  write_checkpoint_header(const_cast<u1*>(this->start_pos()), size, _time, (u4)_type, count());
   release();
 }
 
@@ -159,7 +160,7 @@
   }
   *size = this->used_size();
   assert(this->start_pos() + *size == this->current_pos(), "invariant");
-  write_checkpoint_header(const_cast<u1*>(this->start_pos()), this->used_offset(), _time, _type, count());
+  write_checkpoint_header(const_cast<u1*>(this->start_pos()), this->used_offset(), _time, (u4)_type, count());
   _header = false; // the header is already written
   if (move) {
     this->seek(_offset);
@@ -182,16 +183,16 @@
   return this->used_size() > sizeof(JfrCheckpointEntry);
 }
 
-JfrCheckpointBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
+JfrBlobHandle JfrCheckpointWriter::copy(const JfrCheckpointContext* ctx /* 0 */) {
   size_t size = 0;
   const u1* data = session_data(&size, false, ctx);
-  return JfrCheckpointBlob::make(data, size);
+  return JfrBlob::make(data, size);
 }
 
-JfrCheckpointBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) {
+JfrBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) {
   size_t size = 0;
   const u1* data = session_data(&size, true, ctx);
-  JfrCheckpointBlobHandle blob = JfrCheckpointBlob::make(data, size);
+  JfrBlobHandle blob = JfrBlob::make(data, size);
   if (ctx != NULL) {
     const_cast<JfrCheckpointContext*>(ctx)->count = 0;
     set_context(*ctx);
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -25,8 +25,8 @@
 #ifndef SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
 #define SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
 
-#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
 #include "jfr/recorder/storage/jfrBuffer.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
 #include "jfr/utilities/jfrTime.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 #include "jfr/writers/jfrEventWriterHost.inline.hpp"
@@ -80,8 +80,8 @@
   const JfrCheckpointContext context() const;
   void set_context(const JfrCheckpointContext ctx);
   bool has_data() const;
-  JfrCheckpointBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
-  JfrCheckpointBlobHandle move(const JfrCheckpointContext* ctx = NULL);
+  JfrBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
+  JfrBlobHandle move(const JfrCheckpointContext* ctx = NULL);
 };
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -30,7 +30,6 @@
 #include "gc/shared/gcName.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcWhen.hpp"
-#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 #include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/types/jfrType.hpp"
@@ -293,15 +292,17 @@
 
 class TypeSetSerialization {
  private:
+  JfrCheckpointWriter* _leakp_writer;
   size_t _elements;
   bool _class_unload;
   bool _flushpoint;
  public:
-  explicit TypeSetSerialization(bool class_unload, bool flushpoint) : _elements(0), _class_unload(class_unload), _flushpoint(flushpoint) {}
+  TypeSetSerialization(bool class_unload, bool flushpoint, JfrCheckpointWriter* leakp_writer = NULL) :
+    _leakp_writer(leakp_writer), _elements(0), _class_unload(class_unload), _flushpoint(flushpoint) {}
   void write(JfrCheckpointWriter& writer) {
     MutexLocker cld_lock(SafepointSynchronize::is_at_safepoint() ? NULL : ClassLoaderDataGraph_lock);
     MutexLocker lock(SafepointSynchronize::is_at_safepoint() ? NULL : Module_lock);
-    _elements = JfrTypeSet::serialize(&writer, _class_unload, _flushpoint);
+   _elements = JfrTypeSet::serialize(&writer, _leakp_writer, _class_unload, _flushpoint);
   }
   size_t elements() const {
     return _elements;
@@ -311,10 +312,6 @@
 void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
   TypeSetSerialization type_set(true, false);
   type_set.write(writer);
-  if (LeakProfiler::is_running()) {
-    ObjectSampleCheckpoint::on_type_set_unload(writer);
-    return;
-  }
 };
 
 void FlushTypeSet::serialize(JfrCheckpointWriter& writer) {
@@ -328,8 +325,10 @@
   return _elements;
 }
 
+TypeSet::TypeSet(JfrCheckpointWriter* leakp_writer) : _leakp_writer(leakp_writer) {}
+
 void TypeSet::serialize(JfrCheckpointWriter& writer) {
-  TypeSetSerialization type_set(false, false);
+  TypeSetSerialization type_set(false, false, _leakp_writer);
   type_set.write(writer);
 };
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -124,7 +124,10 @@
 };
 
 class TypeSet : public JfrSerializer {
+ private:
+  JfrCheckpointWriter* _leakp_writer;
  public:
+  explicit TypeSet(JfrCheckpointWriter* leakp_writer = NULL);
   void serialize(JfrCheckpointWriter& writer);
 };
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 #include "jfr/jfr.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 #include "jfr/metadata/jfrSerializer.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
 #include "jfr/recorder/checkpoint/types/jfrType.hpp"
@@ -42,7 +44,7 @@
   JfrSerializerRegistration* _next;
   JfrSerializerRegistration* _prev;
   JfrSerializer* _serializer;
-  mutable JfrCheckpointBlobHandle _cache;
+  mutable JfrBlobHandle _cache;
   JfrTypeId _id;
   bool _permit_cache;
 
@@ -171,9 +173,17 @@
 
 void JfrTypeManager::write_type_set() {
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
+  if (!LeakProfiler::is_running()) {
+    JfrCheckpointWriter writer;
+    TypeSet set;
+    set.serialize(writer);
+    return;
+  }
+  JfrCheckpointWriter leakp_writer;
   JfrCheckpointWriter writer;
-  TypeSet set;
+  TypeSet set(&leakp_writer);
   set.serialize(writer);
+  ObjectSampleCheckpoint::on_type_set(leakp_writer);
 }
 
 void JfrTypeManager::write_type_set_for_unloaded_classes() {
@@ -182,6 +192,9 @@
   const JfrCheckpointContext ctx = writer.context();
   ClassUnloadTypeSet class_unload_set;
   class_unload_set.serialize(writer);
+  if (LeakProfiler::is_running()) {
+    ObjectSampleCheckpoint::on_type_set_unload(writer);
+  }
   if (!Jfr::is_recording()) {
     // discard anything written
     writer.set_context(ctx);
@@ -196,7 +209,7 @@
   return flush.elements();
 }
 
-void JfrTypeManager::create_thread_checkpoint(Thread* t) {
+void JfrTypeManager::create_thread_blob(Thread* t) {
   assert(t != NULL, "invariant");
   ResourceMark rm(t);
   HandleMark hm(t);
@@ -205,8 +218,8 @@
   writer.write_type(TYPE_THREAD);
   type_thread.serialize(writer);
   // create and install a checkpoint blob
-  t->jfr_thread_local()->set_thread_checkpoint(writer.move());
-  assert(t->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+  t->jfr_thread_local()->set_thread_blob(writer.move());
+  assert(t->jfr_thread_local()->has_thread_blob(), "invariant");
 }
 
 void JfrTypeManager::write_thread_checkpoint(Thread* t) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -40,7 +40,7 @@
   static void write_type_set();
   static void write_type_set_for_unloaded_classes();
   static size_t flush_type_set();
-  static void create_thread_checkpoint(Thread* t);
+  static void create_thread_blob(Thread* t);
   static void write_thread_checkpoint(Thread* t);
 };
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -65,6 +65,7 @@
 }
 
 static JfrCheckpointWriter* _writer = NULL;
+static JfrCheckpointWriter* _leakp_writer = NULL;
 static bool _class_unload = false;
 static bool _flushpoint = false;
 static JfrArtifactSet* _artifacts = NULL;
@@ -82,12 +83,12 @@
   return !_artifacts->has_klass_entries() && current_epoch();
 }
 
-static traceid mark_symbol(KlassPtr klass) {
-  return klass != NULL ? create_symbol_id(_artifacts->mark(klass)) : 0;
+static traceid mark_symbol(KlassPtr klass, bool leakp) {
+  return klass != NULL ? create_symbol_id(_artifacts->mark(klass, leakp)) : 0;
 }
 
-static traceid mark_symbol(Symbol* symbol) {
-  return symbol != NULL ? create_symbol_id(_artifacts->mark(symbol)) : 0;
+static traceid mark_symbol(Symbol* symbol, bool leakp) {
+  return symbol != NULL ? create_symbol_id(_artifacts->mark(symbol, leakp)) : 0;
 }
 
 template <typename T>
@@ -102,11 +103,15 @@
   return pkg_entry != NULL ? artifact_id(pkg_entry) : 0;
 }
 
-static traceid module_id(PkgPtr pkg) {
+static traceid module_id(PkgPtr pkg, bool leakp) {
   assert(pkg != NULL, "invariant");
   ModPtr module_entry = pkg->module();
   if (module_entry != NULL && module_entry->is_named()) {
-    SET_TRANSIENT(module_entry);
+    if (leakp) {
+      SET_LEAKP(module_entry);
+    } else {
+      SET_TRANSIENT(module_entry);
+    }
     return artifact_id(module_entry);
   }
   return 0;
@@ -118,12 +123,16 @@
   return METHOD_ID(klass, method);
 }
 
-static traceid cld_id(CldPtr cld) {
+static traceid cld_id(CldPtr cld, bool leakp) {
   assert(cld != NULL, "invariant");
   if (cld->is_unsafe_anonymous()) {
     return 0;
   }
-  SET_TRANSIENT(cld);
+  if (leakp) {
+    SET_LEAKP(cld);
+  } else {
+    SET_TRANSIENT(cld);
+  }
   return artifact_id(cld);
 }
 
@@ -140,6 +149,42 @@
   assert(IS_SERIALIZED(ptr), "invariant");
 }
 
+template <typename T>
+void tag_leakp_artifact(T const& value) {
+  assert(value != NULL, "invariant");
+  SET_LEAKP(value);
+  assert(IS_LEAKP(value), "invariant");
+}
+
+static void tag_leakp_klass_artifacts(KlassPtr k, bool class_unload) {
+  assert(k != NULL, "invariant");
+  PkgPtr pkg = k->package();
+  if (pkg != NULL) {
+    tag_leakp_artifact(pkg);
+    ModPtr module = pkg->module();
+    if (module != NULL) {
+      tag_leakp_artifact(module);
+    }
+  }
+  CldPtr cld = k->class_loader_data();
+  assert(cld != NULL, "invariant");
+  if (!cld->is_unsafe_anonymous()) {
+    tag_leakp_artifact(cld);
+  }
+}
+
+class TagLeakpKlassArtifact {
+  bool _class_unload;
+public:
+  TagLeakpKlassArtifact(bool class_unload) : _class_unload(class_unload) {}
+  bool operator()(KlassPtr klass) {
+    if (IS_LEAKP(klass)) {
+      tag_leakp_klass_artifacts(klass, _class_unload);
+    }
+    return true;
+  }
+};
+
 /*
  * In C++03, functions used as template parameters must have external linkage;
  * this restriction was removed in C++11. Change back to "static" and
@@ -148,11 +193,10 @@
  * The weird naming is an effort to decrease the risk of name clashes.
  */
 
-int write__klass(JfrCheckpointWriter* writer, const void* k) {
+static int write_klass(JfrCheckpointWriter* writer, KlassPtr klass, bool leakp) {
   assert(writer != NULL, "invariant");
   assert(_artifacts != NULL, "invariant");
-  assert(k != NULL, "invariant");
-  KlassPtr klass = (KlassPtr)k;
+  assert(klass != NULL, "invariant");
   traceid pkg_id = 0;
   KlassPtr theklass = klass;
   if (theklass->is_objArray_klass()) {
@@ -165,17 +209,32 @@
     assert(theklass->is_typeArray_klass(), "invariant");
   }
   writer->write(artifact_id(klass));
-  writer->write(cld_id(klass->class_loader_data()));
-  writer->write(mark_symbol(klass));
+  writer->write(cld_id(klass->class_loader_data(), leakp));
+  writer->write(mark_symbol(klass, leakp));
   writer->write(pkg_id);
   writer->write(get_flags(klass));
+  return 1;
+}
+
+int write__klass(JfrCheckpointWriter* writer, const void* k) {
+  assert(k != NULL, "invariant");
+  KlassPtr klass = (KlassPtr)k;
   set_serialized(klass);
-  return 1;
+  return write_klass(writer, klass, false);
+}
+
+int write__klass__leakp(JfrCheckpointWriter* writer, const void* k) {
+  assert(k != NULL, "invariant");
+  KlassPtr klass = (KlassPtr)k;
+  return write_klass(writer, klass, true);
 }
 
 static void do_implied(Klass* klass) {
   assert(klass != NULL, "invariant");
   if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
+    if (_leakp_writer != NULL) {
+      SET_LEAKP(klass);
+    }
     _subsystem_callback->do_artifact(klass);
   }
 }
@@ -225,15 +284,35 @@
 typedef CompositeFunctor<KlassPtr, KlassWriter, KlassArtifactRegistrator> KlassWriterRegistration;
 typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
 
+typedef LeakPredicate<KlassPtr> LeakKlassPredicate;
+typedef JfrPredicatedTypeWriterImplHost<KlassPtr, LeakKlassPredicate, write__klass__leakp> LeakKlassWriterImpl;
+typedef JfrTypeWriterHost<LeakKlassWriterImpl, TYPE_CLASS> LeakKlassWriter;
+typedef CompositeFunctor<KlassPtr, TagLeakpKlassArtifact, LeakKlassWriter> LeakpKlassArtifactTagging;
+
+typedef CompositeFunctor<KlassPtr, LeakpKlassArtifactTagging, KlassWriter> CompositeKlassWriter;
+typedef CompositeFunctor<KlassPtr, CompositeKlassWriter, KlassArtifactRegistrator> CompositeKlassWriterRegistration;
+typedef JfrArtifactCallbackHost<KlassPtr, CompositeKlassWriterRegistration> CompositeKlassCallback;
+
 static bool write_klasses() {
   assert(!_artifacts->has_klass_entries(), "invariant");
   assert(_writer != NULL, "invariant");
   KlassArtifactRegistrator reg(_artifacts);
   KlassWriter kw(_writer, _class_unload);
   KlassWriterRegistration kwr(&kw, &reg);
-  KlassCallback callback(&kwr);
-  _subsystem_callback = &callback;
-  do_klasses();
+  if (_leakp_writer == NULL) {
+    KlassCallback callback(&kwr);
+    _subsystem_callback = &callback;
+    do_klasses();
+  } else {
+    TagLeakpKlassArtifact tagging(_class_unload);
+    LeakKlassWriter lkw(_leakp_writer, _artifacts, _class_unload);
+    LeakpKlassArtifactTagging lpkat(&tagging, &lkw);
+    CompositeKlassWriter ckw(&lpkat, &kw);
+    CompositeKlassWriterRegistration ckwr(&ckw, &reg);
+    CompositeKlassCallback callback(&ckwr);
+    _subsystem_callback = &callback;
+    do_klasses();
+  }
   if (is_complete()) {
     return false;
   }
@@ -256,17 +335,29 @@
   assert(IS_NOT_SERIALIZED(value), "invariant");
 }
 
-int write__package(JfrCheckpointWriter* writer, const void* p) {
+static int write_package(JfrCheckpointWriter* writer, PkgPtr pkg, bool leakp) {
   assert(writer != NULL, "invariant");
   assert(_artifacts != NULL, "invariant");
+  assert(pkg != NULL, "invariant");
+  writer->write(artifact_id(pkg));
+  writer->write(mark_symbol(pkg->name(), leakp));
+  writer->write(module_id(pkg, leakp));
+  writer->write((bool)pkg->is_exported());
+  return 1;
+}
+
+int write__package(JfrCheckpointWriter* writer, const void* p) {
   assert(p != NULL, "invariant");
   PkgPtr pkg = (PkgPtr)p;
-  writer->write(artifact_id(pkg));
-  writer->write(mark_symbol(pkg->name()));
-  writer->write(module_id(pkg));
-  writer->write((bool)pkg->is_exported());
   set_serialized(pkg);
-  return 1;
+  return write_package(writer, pkg, false);
+}
+
+int write__package__leakp(JfrCheckpointWriter* writer, const void* p) {
+  assert(p != NULL, "invariant");
+  PkgPtr pkg = (PkgPtr)p;
+  CLEAR_LEAKP(pkg);
+  return write_package(writer, pkg, true);
 }
 
 static void do_package(PackageEntry* entry) {
@@ -293,32 +384,70 @@
 typedef KlassToFieldEnvelope<PackageFieldSelector, PackageWriter> KlassPackageWriter;
 typedef JfrArtifactCallbackHost<PkgPtr, PackageWriterWithClear> PackageCallback;
 
+typedef LeakPredicate<PkgPtr> LeakPackagePredicate;
+typedef JfrPredicatedTypeWriterImplHost<PkgPtr, LeakPackagePredicate, write__package__leakp> LeakPackageWriterImpl;
+typedef JfrTypeWriterHost<LeakPackageWriterImpl, TYPE_PACKAGE> LeakPackageWriter;
+
+typedef CompositeFunctor<PkgPtr, LeakPackageWriter, PackageWriter> CompositePackageWriter;
+typedef KlassToFieldEnvelope<PackageFieldSelector, CompositePackageWriter> KlassCompositePackageWriter;
+typedef KlassToFieldEnvelope<PackageFieldSelector, PackageWriterWithClear> KlassPackageWriterWithClear;
+typedef CompositeFunctor<PkgPtr, CompositePackageWriter, ClearArtifact<PkgPtr> > CompositePackageWriterWithClear;
+typedef JfrArtifactCallbackHost<PkgPtr, CompositePackageWriterWithClear> CompositePackageCallback;
+
 static void write_packages() {
   assert(_writer != NULL, "invariant");
   PackageWriter pw(_writer, _class_unload);
   KlassPackageWriter kpw(&pw);
-  _artifacts->iterate_klasses(kpw);
-  if (previous_epoch()) {
+  if (current_epoch()) {
+    _artifacts->iterate_klasses(kpw);
+    _artifacts->tally(pw);
+    return;
+  }
+  assert(previous_epoch(), "invariant");
+  if (_leakp_writer == NULL) {
+    _artifacts->iterate_klasses(kpw);
     ClearArtifact<PkgPtr> clear;
     PackageWriterWithClear pwwc(&pw, &clear);
     PackageCallback callback(&pwwc);
     _subsystem_callback = &callback;
     do_packages();
+  } else {
+    LeakPackageWriter lpw(_leakp_writer, _class_unload);
+    CompositePackageWriter cpw(&lpw, &pw);
+    KlassCompositePackageWriter kcpw(&cpw);
+    _artifacts->iterate_klasses(kcpw);
+    ClearArtifact<PkgPtr> clear;
+    CompositePackageWriterWithClear cpwwc(&cpw, &clear);
+    CompositePackageCallback callback(&cpwwc);
+    _subsystem_callback = &callback;
+    do_packages();
   }
   _artifacts->tally(pw);
 }
 
+static int write_module(JfrCheckpointWriter* writer, ModPtr mod, bool leakp) {
+  assert(mod != NULL, "invariant");
+  assert(_artifacts != NULL, "invariant");
+  writer->write(artifact_id(mod));
+  writer->write(mark_symbol(mod->name(), leakp));
+  writer->write(mark_symbol(mod->version(), leakp));
+  writer->write(mark_symbol(mod->location(), leakp));
+  writer->write(cld_id(mod->loader_data(), leakp));
+  return 1;
+}
+
 int write__module(JfrCheckpointWriter* writer, const void* m) {
   assert(m != NULL, "invariant");
-  assert(_artifacts != NULL, "invariant");
   ModPtr mod = (ModPtr)m;
-  writer->write(artifact_id(mod));
-  writer->write(mark_symbol(mod->name()));
-  writer->write(mark_symbol(mod->version()));
-  writer->write(mark_symbol(mod->location()));
-  writer->write(cld_id(mod->loader_data()));
   set_serialized(mod);
-  return 1;
+  return write_module(writer, mod, false);
+}
+
+int write__module__leakp(JfrCheckpointWriter* writer, const void* m) {
+  assert(m != NULL, "invariant");
+  ModPtr mod = (ModPtr)m;
+  CLEAR_LEAKP(mod);
+  return write_module(writer, mod, true);
 }
 
 static void do_module(ModuleEntry* entry) {
@@ -346,24 +475,48 @@
 typedef JfrArtifactCallbackHost<ModPtr, ModuleWriterWithClear> ModuleCallback;
 typedef KlassToFieldEnvelope<ModuleFieldSelector, ModuleWriter> KlassModuleWriter;
 
+typedef LeakPredicate<ModPtr> LeakModulePredicate;
+typedef JfrPredicatedTypeWriterImplHost<ModPtr, LeakModulePredicate, write__module__leakp> LeakModuleWriterImpl;
+typedef JfrTypeWriterHost<LeakModuleWriterImpl, TYPE_MODULE> LeakModuleWriter;
+
+typedef CompositeFunctor<ModPtr, LeakModuleWriter, ModuleWriter> CompositeModuleWriter;
+typedef KlassToFieldEnvelope<ModuleFieldSelector, CompositeModuleWriter> KlassCompositeModuleWriter;
+typedef CompositeFunctor<ModPtr, CompositeModuleWriter, ClearArtifact<ModPtr> > CompositeModuleWriterWithClear;
+typedef JfrArtifactCallbackHost<ModPtr, CompositeModuleWriterWithClear> CompositeModuleCallback;
+
 static void write_modules() {
   assert(_writer != NULL, "invariant");
   ModuleWriter mw(_writer, _class_unload);
   KlassModuleWriter kmw(&mw);
-  _artifacts->iterate_klasses(kmw);
-  if (previous_epoch()) {
+  if (current_epoch()) {
+    _artifacts->iterate_klasses(kmw);
+    _artifacts->tally(mw);
+    return;
+  }
+  assert(previous_epoch(), "invariant");
+  if (_leakp_writer == NULL) {
+    _artifacts->iterate_klasses(kmw);
     ClearArtifact<ModPtr> clear;
     ModuleWriterWithClear mwwc(&mw, &clear);
     ModuleCallback callback(&mwwc);
     _subsystem_callback = &callback;
     do_modules();
+  } else {
+    LeakModuleWriter lmw(_leakp_writer, _class_unload);
+    CompositeModuleWriter cmw(&lmw, &mw);
+    KlassCompositeModuleWriter kcpw(&cmw);
+    _artifacts->iterate_klasses(kcpw);
+    ClearArtifact<ModPtr> clear;
+    CompositeModuleWriterWithClear cmwwc(&cmw, &clear);
+    CompositeModuleCallback callback(&cmwwc);
+    _subsystem_callback = &callback;
+    do_modules();
   }
   _artifacts->tally(mw);
 }
 
-int write__classloader(JfrCheckpointWriter* writer, const void* c) {
-  assert(c != NULL, "invariant");
-  CldPtr cld = (CldPtr)c;
+static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp) {
+  assert(cld != NULL, "invariant");
   assert(!cld->is_unsafe_anonymous(), "invariant");
   // class loader type
   const Klass* class_loader_klass = cld->class_loader_klass();
@@ -375,10 +528,23 @@
   } else {
     writer->write(artifact_id(cld)); // class loader instance id
     writer->write(artifact_id(class_loader_klass)); // class loader type id
-    writer->write(mark_symbol(cld->name())); // class loader instance name
+    writer->write(mark_symbol(cld->name(), leakp)); // class loader instance name
   }
+  return 1;
+}
+
+int write__classloader(JfrCheckpointWriter* writer, const void* c) {
+  assert(c != NULL, "invariant");
+  CldPtr cld = (CldPtr)c;
   set_serialized(cld);
-  return 1;
+  return write_classloader(writer, cld, false);
+}
+
+int write__classloader__leakp(JfrCheckpointWriter* writer, const void* c) {
+  assert(c != NULL, "invariant");
+  CldPtr cld = (CldPtr)c;
+  CLEAR_LEAKP(cld);
+  return write_classloader(writer, cld, true);
 }
 
 static void do_class_loader_data(ClassLoaderData* cld) {
@@ -419,17 +585,42 @@
 typedef JfrArtifactCallbackHost<CldPtr, CldWriterWithClear> CldCallback;
 typedef KlassToFieldEnvelope<CldFieldSelector, CldWriter> KlassCldWriter;
 
+typedef LeakPredicate<CldPtr> LeakCldPredicate;
+typedef JfrPredicatedTypeWriterImplHost<CldPtr, LeakCldPredicate, write__classloader__leakp> LeakCldWriterImpl;
+typedef JfrTypeWriterHost<LeakCldWriterImpl, TYPE_CLASSLOADER> LeakCldWriter;
+
+typedef CompositeFunctor<CldPtr, LeakCldWriter, CldWriter> CompositeCldWriter;
+typedef KlassToFieldEnvelope<CldFieldSelector, CompositeCldWriter> KlassCompositeCldWriter;
+typedef CompositeFunctor<CldPtr, CompositeCldWriter, ClearArtifact<CldPtr> > CompositeCldWriterWithClear;
+typedef JfrArtifactCallbackHost<CldPtr, CompositeCldWriterWithClear> CompositeCldCallback;
+
 static void write_classloaders() {
   assert(_writer != NULL, "invariant");
   CldWriter cldw(_writer, _class_unload);
   KlassCldWriter kcw(&cldw);
-  _artifacts->iterate_klasses(kcw);
-  if (previous_epoch()) {
+  if (current_epoch()) {
+    _artifacts->iterate_klasses(kcw);
+    _artifacts->tally(cldw);
+    return;
+  }
+  assert(previous_epoch(), "invariant");
+  if (_leakp_writer == NULL) {
+    _artifacts->iterate_klasses(kcw);
     ClearArtifact<CldPtr> clear;
     CldWriterWithClear cldwwc(&cldw, &clear);
     CldCallback callback(&cldwwc);
     _subsystem_callback = &callback;
     do_class_loaders();
+  } else {
+    LeakCldWriter lcldw(_leakp_writer, _class_unload);
+    CompositeCldWriter ccldw(&lcldw, &cldw);
+    KlassCompositeCldWriter kccldw(&ccldw);
+    _artifacts->iterate_klasses(kccldw);
+    ClearArtifact<CldPtr> clear;
+    CompositeCldWriterWithClear ccldwwc(&ccldw, &clear);
+    CompositeCldCallback callback(&ccldwwc);
+    _subsystem_callback = &callback;
+    do_class_loaders();
   }
   _artifacts->tally(cldw);
 }
@@ -446,31 +637,41 @@
   assert(IS_METHOD_SERIALIZED(method), "invariant");
 }
 
-int write__method(JfrCheckpointWriter* writer, const void* m) {
+static int write_method(JfrCheckpointWriter* writer, MethodPtr method, bool leakp) {
   assert(writer != NULL, "invariant");
+  assert(method != NULL, "invariant");
   assert(_artifacts != NULL, "invariant");
-  assert(m != NULL, "invariant");
-  MethodPtr method = (MethodPtr)m;
   KlassPtr klass = method->method_holder();
   assert(klass != NULL, "invariant");
-  assert(METHOD_USED_ANY_EPOCH(klass), "invariant");
   writer->write(method_id(klass, method));
   writer->write(artifact_id(klass));
-  writer->write(mark_symbol(method->name()));
-  writer->write(mark_symbol(method->signature()));
+  writer->write(mark_symbol(method->name(), leakp));
+  writer->write(mark_symbol(method->signature(), leakp));
   writer->write((u2)get_flags(method));
   writer->write(get_visibility(method));
-  set_serialized(method);
   return 1;
 }
 
-template <typename MethodCallback, typename KlassCallback>
+int write__method(JfrCheckpointWriter* writer, const void* m) {
+  assert(m != NULL, "invariant");
+  MethodPtr method = (MethodPtr)m;
+  set_serialized(method);
+  return write_method(writer, method, false);
+}
+
+int write__method__leakp(JfrCheckpointWriter* writer, const void* m) {
+  assert(m != NULL, "invariant");
+  MethodPtr method = (MethodPtr)m;
+  return write_method(writer, method, true);
+}
+
+template <typename MethodCallback, typename KlassCallback, bool leakp>
 class MethodIteratorHost {
  private:
   MethodCallback _method_cb;
   KlassCallback _klass_cb;
-  MethodUsedPredicate _method_used_predicate;
-  MethodFlagPredicate _method_flag_predicate;
+  MethodUsedPredicate<leakp> _method_used_predicate;
+  MethodFlagPredicate<leakp> _method_flag_predicate;
  public:
   MethodIteratorHost(JfrCheckpointWriter* writer,
                      bool current_epoch = false,
@@ -483,7 +684,6 @@
 
   bool operator()(KlassPtr klass) {
     if (_method_used_predicate(klass)) {
-      assert(METHOD_AND_CLASS_USED_ANY_EPOCH(klass), "invariant");
       const InstanceKlass* const ik = InstanceKlass::cast(klass);
       const int len = ik->methods()->length();
       for (int i = 0; i < len; ++i) {
@@ -512,14 +712,26 @@
 
 typedef SerializePredicate<MethodPtr> MethodPredicate;
 typedef JfrPredicatedTypeWriterImplHost<MethodPtr, MethodPredicate, write__method> MethodWriterImplTarget;
+typedef Wrapper<KlassPtr, Stub> KlassCallbackStub;
 typedef JfrTypeWriterHost<MethodWriterImplTarget, TYPE_METHOD> MethodWriterImpl;
-typedef Wrapper<KlassPtr, Stub> KlassCallbackStub;
-typedef MethodIteratorHost<MethodWriterImpl, KlassCallbackStub> MethodWriter;
+typedef MethodIteratorHost<MethodWriterImpl, KlassCallbackStub, false> MethodWriter;
+
+typedef LeakPredicate<MethodPtr> LeakMethodPredicate;
+typedef JfrPredicatedTypeWriterImplHost<MethodPtr, LeakMethodPredicate, write__method__leakp> LeakMethodWriterImplTarget;
+typedef JfrTypeWriterHost<LeakMethodWriterImplTarget, TYPE_METHOD> LeakMethodWriterImpl;
+typedef MethodIteratorHost<LeakMethodWriterImpl, KlassCallbackStub, true> LeakMethodWriter;
+typedef CompositeFunctor<KlassPtr, LeakMethodWriter, MethodWriter> CompositeMethodWriter;
 
 static void write_methods() {
   assert(_writer != NULL, "invariant");
   MethodWriter mw(_writer, current_epoch(), _class_unload);
-  _artifacts->iterate_klasses(mw);
+  if (_leakp_writer == NULL) {
+    _artifacts->iterate_klasses(mw);
+  } else {
+    LeakMethodWriter lpmw(_leakp_writer, current_epoch(), _class_unload);
+    CompositeMethodWriter cmw(&lpmw, &mw);
+    _artifacts->iterate_klasses(cmw);
+  }
   _artifacts->tally(mw);
 }
 
@@ -537,47 +749,97 @@
   assert(ptr->is_serialized(), "invariant");
 }
 
-int write__symbol(JfrCheckpointWriter* writer, const void* e) {
+static int write_symbol(JfrCheckpointWriter* writer, SymbolEntryPtr entry, bool leakp) {
   assert(writer != NULL, "invariant");
-  assert(e != NULL, "invariant");
+  assert(entry != NULL, "invariant");
   ResourceMark rm;
-  SymbolEntryPtr entry = (SymbolEntryPtr)e;
   writer->write(create_symbol_id(entry->id()));
   writer->write(entry->value()->as_C_string());
+  return 1;
+}
+
+int write__symbol(JfrCheckpointWriter* writer, const void* e) {
+  assert(e != NULL, "invariant");
+  SymbolEntryPtr entry = (SymbolEntryPtr)e;
   set_serialized(entry);
+  return write_symbol(writer, entry, false);
+}
+
+int write__symbol__leakp(JfrCheckpointWriter* writer, const void* e) {
+  assert(e != NULL, "invariant");
+  SymbolEntryPtr entry = (SymbolEntryPtr)e;
+  return write_symbol(writer, entry, true);
+}
+
+static int write_cstring(JfrCheckpointWriter* writer, CStringEntryPtr entry, bool leakp) {
+  assert(writer != NULL, "invariant");
+  assert(entry != NULL, "invariant");
+  writer->write(create_symbol_id(entry->id()));
+  writer->write(entry->value());
   return 1;
 }
 
 int write__cstring(JfrCheckpointWriter* writer, const void* e) {
-  assert(writer != NULL, "invariant");
   assert(e != NULL, "invariant");
   CStringEntryPtr entry = (CStringEntryPtr)e;
-  writer->write(create_symbol_id(entry->id()));
-  writer->write(entry->value());
   set_serialized(entry);
-  return 1;
+  return write_cstring(writer, entry, false);
 }
 
-typedef SymbolPredicate<SymbolEntryPtr> SymPredicate;
+int write__cstring__leakp(JfrCheckpointWriter* writer, const void* e) {
+  assert(e != NULL, "invariant");
+  CStringEntryPtr entry = (CStringEntryPtr)e;
+  return write_cstring(writer, entry, true);
+}
+
+typedef SymbolPredicate<SymbolEntryPtr, false> SymPredicate;
 typedef JfrPredicatedTypeWriterImplHost<SymbolEntryPtr, SymPredicate, write__symbol> SymbolEntryWriterImpl;
 typedef JfrTypeWriterHost<SymbolEntryWriterImpl, TYPE_SYMBOL> SymbolEntryWriter;
-typedef SymbolPredicate<CStringEntryPtr> CStringPredicate;
+typedef SymbolPredicate<CStringEntryPtr, false> CStringPredicate;
 typedef JfrPredicatedTypeWriterImplHost<CStringEntryPtr, CStringPredicate, write__cstring> CStringEntryWriterImpl;
 typedef JfrTypeWriterHost<CStringEntryWriterImpl, TYPE_SYMBOL> CStringEntryWriter;
 
+typedef SymbolPredicate<SymbolEntryPtr, true> LeakSymPredicate;
+typedef JfrPredicatedTypeWriterImplHost<SymbolEntryPtr, LeakSymPredicate, write__symbol__leakp> LeakSymbolEntryWriterImpl;
+typedef JfrTypeWriterHost<LeakSymbolEntryWriterImpl, TYPE_SYMBOL> LeakSymbolEntryWriter;
+typedef CompositeFunctor<SymbolEntryPtr, LeakSymbolEntryWriter, SymbolEntryWriter> CompositeSymbolWriter;
+typedef SymbolPredicate<CStringEntryPtr, true> LeakCStringPredicate;
+typedef JfrPredicatedTypeWriterImplHost<CStringEntryPtr, LeakCStringPredicate, write__cstring__leakp> LeakCStringEntryWriterImpl;
+typedef JfrTypeWriterHost<LeakCStringEntryWriterImpl, TYPE_SYMBOL> LeakCStringEntryWriter;
+typedef CompositeFunctor<CStringEntryPtr, LeakCStringEntryWriter, CStringEntryWriter> CompositeCStringWriter;
+
+static void write_symbols_with_leakp() {
+  assert(_leakp_writer != NULL, "invariant");
+  SymbolEntryWriter sw(_writer, _class_unload);
+  LeakSymbolEntryWriter lsw(_leakp_writer, _class_unload);
+  CompositeSymbolWriter csw(&lsw, &sw);
+  _artifacts->iterate_symbols(csw);
+  CStringEntryWriter ccsw(_writer, _class_unload, true); // skip header
+  LeakCStringEntryWriter lccsw(_leakp_writer, _class_unload, true); // skip header
+  CompositeCStringWriter cccsw(&lccsw, &ccsw);
+  _artifacts->iterate_cstrings(cccsw);
+  sw.add(ccsw.count());
+  lsw.add(lccsw.count());
+  _artifacts->tally(sw);
+}
+
 static void write_symbols() {
   assert(_writer != NULL, "invariant");
-  SymbolEntryWriter symbol_writer(_writer, _class_unload);
-  _artifacts->iterate_symbols(symbol_writer);
-  CStringEntryWriter cstring_writer(_writer, _class_unload, true); // skip header
-  _artifacts->iterate_cstrings(cstring_writer);
-  symbol_writer.add(cstring_writer.count());
-  _artifacts->tally(symbol_writer);
+  if (_leakp_writer != NULL) {
+    write_symbols_with_leakp();
+    return;
+  }
+  SymbolEntryWriter sw(_writer, _class_unload);
+  _artifacts->iterate_symbols(sw);
+  CStringEntryWriter csw(_writer, _class_unload, true); // skip header
+  _artifacts->iterate_cstrings(csw);
+  sw.add(csw.count());
+  _artifacts->tally(sw);
 }
 
 typedef Wrapper<KlassPtr, ClearArtifact> ClearKlassBits;
 typedef Wrapper<MethodPtr, ClearArtifact> ClearMethodFlag;
-typedef MethodIteratorHost<ClearMethodFlag, ClearKlassBits> ClearKlassAndMethods;
+typedef MethodIteratorHost<ClearMethodFlag, ClearKlassBits, false> ClearKlassAndMethods;
 
 static size_t teardown() {
   assert(_artifacts != NULL, "invariant");
@@ -592,8 +854,9 @@
   return total_count;
 }
 
-static void setup(JfrCheckpointWriter* writer, bool class_unload, bool flushpoint) {
+static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload, bool flushpoint) {
   _writer = writer;
+  _leakp_writer = leakp_writer;
   _class_unload = class_unload;
   _flushpoint = flushpoint;
   if (_artifacts == NULL) {
@@ -608,10 +871,10 @@
 /**
  * Write all "tagged" (in-use) constant artifacts and their dependencies.
  */
-size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, bool class_unload, bool flushpoint) {
+size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload, bool flushpoint) {
   assert(writer != NULL, "invariant");
   ResourceMark rm;
-  setup(writer, class_unload, flushpoint);
+  setup(writer, leakp_writer, class_unload, flushpoint);
   // write order is important because an individual write step
   // might tag an artifact to be written in a subsequent step
   if (!write_klasses()) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -31,7 +31,7 @@
 
 class JfrTypeSet : AllStatic {
  public:
-  static size_t serialize(JfrCheckpointWriter* writer, bool class_unload, bool flushpoint);
+  static size_t serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload, bool flushpoint);
 };
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESET_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -67,38 +67,7 @@
   _class_unload = class_unload;
 }
 
-traceid JfrSymbolId::mark_unsafe_anonymous_klass_name(const Klass* k) {
-  assert(k != NULL, "invariant");
-  assert(k->is_instance_klass(), "invariant");
-  assert(is_unsafe_anonymous_klass(k), "invariant");
-
-  uintptr_t anonymous_symbol_hash_code = 0;
-  const char* const anonymous_symbol =
-    create_unsafe_anonymous_klass_symbol((const InstanceKlass*)k, anonymous_symbol_hash_code);
-
-  if (anonymous_symbol == NULL) {
-    return 0;
-  }
-
-  assert(anonymous_symbol_hash_code != 0, "invariant");
-  traceid symbol_id = mark(anonymous_symbol, anonymous_symbol_hash_code);
-  assert(mark(anonymous_symbol, anonymous_symbol_hash_code) == symbol_id, "invariant");
-  return symbol_id;
-}
-
-const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(const Symbol* symbol) const {
-  return _sym_table->lookup_only(symbol, (uintptr_t)symbol->identity_hash());
-}
-
-const JfrSymbolId::SymbolEntry* JfrSymbolId::map_symbol(uintptr_t hash) const {
-  return _sym_table->lookup_only(NULL, hash);
-}
-
-const JfrSymbolId::CStringEntry* JfrSymbolId::map_cstring(uintptr_t hash) const {
-  return _cstring_table->lookup_only(NULL, hash);
-}
-
-void JfrSymbolId::assign_id(const SymbolEntry* entry) {
+void JfrSymbolId::link(const SymbolEntry* entry) {
   assert(entry != NULL, "invariant");
   const_cast<Symbol*>(entry->literal())->increment_refcount();
   assert(entry->id() == 0, "invariant");
@@ -107,7 +76,7 @@
   _sym_list = entry;
 }
 
-bool JfrSymbolId::equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry) {
+bool JfrSymbolId::equals(uintptr_t hash, const SymbolEntry* entry) {
   // query might be NULL
   assert(entry != NULL, "invariant");
   assert(entry->hash() == hash, "invariant");
@@ -119,16 +88,26 @@
   const_cast<Symbol*>(entry->literal())->decrement_refcount();
 }
 
-void JfrSymbolId::assign_id(const CStringEntry* entry) {
+static const char* resource_str_to_c_heap_str(const char* resource_str) {
+  assert(resource_str != NULL, "invariant");
+  const size_t length = strlen(resource_str);
+  char* const c_string = JfrCHeapObj::new_array<char>(length + 1);
+  assert(c_string != NULL, "invariant");
+  strncpy(c_string, resource_str, length + 1);
+  return c_string;
+}
+
+void JfrSymbolId::link(const CStringEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->id() == 0, "invariant");
   entry->set_id(++_symbol_id_counter);
   entry->set_list_next(_cstring_list);
+  const char* const resource_str = entry->literal();
+  const_cast<CStringEntry*>(entry)->set_literal(resource_str_to_c_heap_str(resource_str));
   _cstring_list = entry;
 }
 
-bool JfrSymbolId::equals(const char* query, uintptr_t hash, const CStringEntry* entry) {
-  // query might be NULL
+bool JfrSymbolId::equals(uintptr_t hash, const CStringEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->hash() == hash, "invariant");
   return true;
@@ -137,53 +116,57 @@
 void JfrSymbolId::unlink(const CStringEntry* entry) {
   assert(entry != NULL, "invariant");
   if (entry->id() != 1) {
-    FREE_C_HEAP_ARRAY(char, entry->literal());
+    JfrCHeapObj::free(const_cast<char*>(entry->literal()), 0);
   }
 }
 
-traceid JfrSymbolId::mark(const Klass* k) {
-  assert(k != NULL, "invariant");
-  traceid symbol_id = 0;
-  if (is_unsafe_anonymous_klass(k)) {
-    symbol_id = mark_unsafe_anonymous_klass_name(k);
-  }
-  if (0 == symbol_id) {
-    Symbol* const sym = k->name();
-    if (sym != NULL) {
-      symbol_id = mark(sym);
-    }
-  }
-  assert(symbol_id > 0, "a symbol handler must mark the symbol for writing");
-  return symbol_id;
+traceid JfrSymbolId::mark(const Symbol* symbol, bool leakp) {
+  assert(symbol != NULL, "invariant");
+  return mark((uintptr_t)symbol->identity_hash(), symbol, leakp);
 }
 
-traceid JfrSymbolId::mark(const Symbol* symbol) {
-  assert(symbol != NULL, "invariant");
-  return mark(symbol, (uintptr_t)symbol->identity_hash());
-}
+static unsigned int last_symbol_hash = 0;
+static traceid last_symbol_id = 0;
 
-traceid JfrSymbolId::mark(const Symbol* data, uintptr_t hash) {
+traceid JfrSymbolId::mark(uintptr_t hash, const Symbol* data, bool leakp) {
   assert(data != NULL, "invariant");
   assert(_sym_table != NULL, "invariant");
-  const SymbolEntry& entry = _sym_table->lookup_put(data, hash);
+  if (hash == last_symbol_hash) {
+    assert(last_symbol_id != 0, "invariant");
+    return last_symbol_id;
+  }
+  const SymbolEntry& entry = _sym_table->lookup_put(hash, data);
   if (_class_unload) {
     entry.set_unloading();
   }
-  return entry.id();
+  if (leakp) {
+    entry.set_leakp();
+  }
+  last_symbol_hash = hash;
+  last_symbol_id = entry.id();
+  return last_symbol_id;
 }
 
-traceid JfrSymbolId::mark(const char* str, uintptr_t hash) {
+static unsigned int last_cstring_hash = 0;
+static traceid last_cstring_id = 0;
+
+traceid JfrSymbolId::mark(uintptr_t hash, const char* str, bool leakp) {
   assert(str != NULL, "invariant");
-  const CStringEntry& entry = _cstring_table->lookup_put(str, hash);
+  assert(_cstring_table != NULL, "invariant");
+  if (hash == last_cstring_hash) {
+    assert(last_cstring_id != 0, "invariant");
+    return last_cstring_id;
+  }
+  const CStringEntry& entry = _cstring_table->lookup_put(hash, str);
   if (_class_unload) {
     entry.set_unloading();
   }
-  return entry.id();
-}
-
-bool JfrSymbolId::is_unsafe_anonymous_klass(const Klass* k) {
-  assert(k != NULL, "invariant");
-  return k->is_instance_klass() && ((const InstanceKlass*)k)->is_unsafe_anonymous();
+  if (leakp) {
+    entry.set_leakp();
+  }
+  last_cstring_hash = hash;
+  last_cstring_id = entry.id();
+  return last_cstring_id;
 }
 
 /*
@@ -194,7 +177,7 @@
 * caller needs ResourceMark
 */
 
-uintptr_t JfrSymbolId::unsafe_anonymous_klass_name_hash_code(const InstanceKlass* ik) {
+uintptr_t JfrSymbolId::unsafe_anonymous_klass_name_hash(const InstanceKlass* ik) {
   assert(ik != NULL, "invariant");
   assert(ik->is_unsafe_anonymous(), "invariant");
   const oop mirror = ik->java_mirror_no_keepalive();
@@ -202,19 +185,18 @@
   return (uintptr_t)mirror->identity_hash();
 }
 
-const char* JfrSymbolId::create_unsafe_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t& hashcode) {
+static const char* create_unsafe_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t hash) {
   assert(ik != NULL, "invariant");
   assert(ik->is_unsafe_anonymous(), "invariant");
-  assert(0 == hashcode, "invariant");
+  assert(hash != 0, "invariant");
   char* anonymous_symbol = NULL;
   const oop mirror = ik->java_mirror_no_keepalive();
   assert(mirror != NULL, "invariant");
   char hash_buf[40];
-  hashcode = unsafe_anonymous_klass_name_hash_code(ik);
-  sprintf(hash_buf, "/" UINTX_FORMAT, hashcode);
+  sprintf(hash_buf, "/" UINTX_FORMAT, hash);
   const size_t hash_len = strlen(hash_buf);
   const size_t result_len = ik->name()->utf8_length();
-  anonymous_symbol = NEW_C_HEAP_ARRAY(char, result_len + hash_len + 1, mtTracing);
+  anonymous_symbol = NEW_RESOURCE_ARRAY(char, result_len + hash_len + 1);
   assert(anonymous_symbol != NULL, "invariant");
   ik->name()->as_klass_external_name(anonymous_symbol, (int)result_len + 1);
   assert(strlen(anonymous_symbol) == result_len, "invariant");
@@ -223,17 +205,54 @@
   return anonymous_symbol;
 }
 
-uintptr_t JfrSymbolId::regular_klass_name_hash_code(const Klass* k) {
+bool JfrSymbolId::is_unsafe_anonymous_klass(const Klass* k) {
   assert(k != NULL, "invariant");
-  const Symbol* const sym = k->name();
-  assert(sym != NULL, "invariant");
-  return (uintptr_t)const_cast<Symbol*>(sym)->identity_hash();
+  return k->is_instance_klass() && ((const InstanceKlass*)k)->is_unsafe_anonymous();
+}
+
+static unsigned int last_anonymous_hash = 0;
+static traceid last_anonymous_id = 0;
+
+traceid JfrSymbolId::mark_unsafe_anonymous_klass_name(const InstanceKlass* ik, bool leakp) {
+  assert(ik != NULL, "invariant");
+  assert(ik->is_unsafe_anonymous(), "invariant");
+  const uintptr_t hash = unsafe_anonymous_klass_name_hash(ik);
+  if (hash == last_anonymous_hash) {
+    assert(last_anonymous_id != 0, "invariant");
+    return last_anonymous_id;
+  }
+  last_anonymous_hash = hash;
+  last_anonymous_id = mark(hash, create_unsafe_anonymous_klass_symbol(ik, hash), leakp);
+  return last_anonymous_id;
+}
+
+traceid JfrSymbolId::mark(const Klass* k, bool leakp) {
+  assert(k != NULL, "invariant");
+  traceid symbol_id = 0;
+  if (is_unsafe_anonymous_klass(k)) {
+    assert(k->is_instance_klass(), "invariant");
+    symbol_id = mark_unsafe_anonymous_klass_name((const InstanceKlass*)k, leakp);
+  }
+  if (0 == symbol_id) {
+    Symbol* const sym = k->name();
+    if (sym != NULL) {
+      symbol_id = mark(sym, leakp);
+    }
+  }
+  assert(symbol_id > 0, "a symbol handler must mark the symbol for writing");
+  return symbol_id;
 }
 
 static void preload_bootstrap_loader_name(JfrSymbolId* symbol_id) {
   assert(symbol_id != NULL, "invariant");
   assert(!symbol_id->has_entries(), "invariant");
-  symbol_id->mark((const char*)&BOOTSTRAP_LOADER_NAME, 0); // pre-load "bootstrap" into id 1
+  symbol_id->mark(1, (const char*)&BOOTSTRAP_LOADER_NAME, false); // pre-load "bootstrap" into id 1
+}
+
+static void reset_symbol_caches() {
+  last_anonymous_hash = 0;
+  last_symbol_hash = 0;
+  last_cstring_hash = 0;
 }
 
 JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_id(new JfrSymbolId()),
@@ -260,41 +279,31 @@
 }
 
 void JfrArtifactSet::clear() {
+  reset_symbol_caches();
   _symbol_id->clear();
   preload_bootstrap_loader_name(_symbol_id);
   // _klass_list will be cleared by a ResourceMark
 }
 
-traceid JfrArtifactSet::mark_unsafe_anonymous_klass_name(const Klass* klass) {
-  return _symbol_id->mark_unsafe_anonymous_klass_name(klass);
+traceid JfrArtifactSet::mark_unsafe_anonymous_klass_name(const Klass* klass, bool leakp) {
+  assert(klass->is_instance_klass(), "invariant");
+  return _symbol_id->mark_unsafe_anonymous_klass_name((const InstanceKlass*)klass, leakp);
 }
 
-traceid JfrArtifactSet::mark(const Symbol* sym, uintptr_t hash) {
-  return _symbol_id->mark(sym, hash);
-}
-
-traceid JfrArtifactSet::mark(const Klass* klass) {
-  return _symbol_id->mark(klass);
+traceid JfrArtifactSet::mark(uintptr_t hash, const Symbol* sym, bool leakp) {
+  return _symbol_id->mark(hash, sym, leakp);
 }
 
-traceid JfrArtifactSet::mark(const Symbol* symbol) {
-  return _symbol_id->mark(symbol);
-}
-
-traceid JfrArtifactSet::mark(const char* const str, uintptr_t hash) {
-  return _symbol_id->mark(str, hash);
+traceid JfrArtifactSet::mark(const Klass* klass, bool leakp) {
+  return _symbol_id->mark(klass, leakp);
 }
 
-const JfrSymbolId::SymbolEntry* JfrArtifactSet::map_symbol(const Symbol* symbol) const {
-  return _symbol_id->map_symbol(symbol);
+traceid JfrArtifactSet::mark(const Symbol* symbol, bool leakp) {
+  return _symbol_id->mark(symbol, leakp);
 }
 
-const JfrSymbolId::SymbolEntry* JfrArtifactSet::map_symbol(uintptr_t hash) const {
-  return _symbol_id->map_symbol(hash);
-}
-
-const JfrSymbolId::CStringEntry* JfrArtifactSet::map_cstring(uintptr_t hash) const {
-  return _symbol_id->map_cstring(hash);
+traceid JfrArtifactSet::mark(uintptr_t hash, const char* const str, bool leakp) {
+  return _symbol_id->mark(hash, str, leakp);
 }
 
 bool JfrArtifactSet::has_klass_entries() const {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -126,40 +126,119 @@
   }
 };
 
-template <typename T>
+template <typename T, bool leakp>
 class SymbolPredicate {
   bool _class_unload;
  public:
   SymbolPredicate(bool class_unload) : _class_unload(class_unload) {}
   bool operator()(T const& value) {
     assert(value != NULL, "invariant");
-    return _class_unload ? value->is_unloading() : !value->is_serialized();
+    if (_class_unload) {
+      return leakp ? value->is_leakp() : value->is_unloading();
+    }
+    return leakp ? value->is_leakp() : !value->is_serialized();
   }
 };
 
+template <bool leakp>
 class MethodUsedPredicate {
   bool _current_epoch;
 public:
   MethodUsedPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
   bool operator()(const Klass* klass) {
-    return _current_epoch ? METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_PREV_EPOCH(klass);
+    if (_current_epoch) {
+      return leakp ? IS_LEAKP(klass) : METHOD_USED_THIS_EPOCH(klass);
+    }
+    return  leakp ? IS_LEAKP(klass) : METHOD_USED_PREV_EPOCH(klass);
   }
 };
 
+template <bool leakp>
 class MethodFlagPredicate {
   bool _current_epoch;
  public:
   MethodFlagPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
   bool operator()(const Method* method) {
-    return _current_epoch ? METHOD_FLAG_USED_THIS_EPOCH(method) : METHOD_FLAG_USED_PREV_EPOCH(method);
+    if (_current_epoch) {
+      return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_THIS_EPOCH(method);
+    }
+    return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_PREV_EPOCH(method);
+  }
+};
+
+template <typename T>
+class LeakPredicate {
+ public:
+  LeakPredicate(bool class_unload) {}
+  bool operator()(T const& value) {
+    return IS_LEAKP(value);
+  }
+};
+
+template <>
+class LeakPredicate<const Method*> {
+ public:
+  LeakPredicate(bool class_unload) {}
+  bool operator()(const Method* method) {
+    assert(method != NULL, "invariant");
+    return IS_METHOD_LEAKP_USED(method);
   }
 };
 
+template <typename T, int compare(const T&, const T&)>
+class UniquePredicate {
+ private:
+  GrowableArray<T> _seen;
+ public:
+   UniquePredicate(bool) : _seen() {}
+   bool operator()(T const& value) {
+     bool not_unique;
+     _seen.template find_sorted<T, compare>(value, not_unique);
+     if (not_unique) {
+       return false;
+     }
+     _seen.template insert_sorted<compare>(value);
+     return true;
+   }
+};
+
+template <typename T, int compare(const T&, const T&)>
+class CompositeLeakPredicate {
+  LeakPredicate<T> _leak_predicate;
+  UniquePredicate<T, compare> _unique;
+ public:
+  CompositeLeakPredicate(bool class_unload) : _leak_predicate(class_unload), _unique(class_unload) {}
+  bool operator()(T const& value) {
+    return _leak_predicate(value) && _unique(value);
+  }
+};
+
+template <typename T, typename IdType>
+class ListEntry : public JfrHashtableEntry<T, IdType> {
+ public:
+  ListEntry(uintptr_t hash, const T& data) : JfrHashtableEntry<T, IdType>(hash, data),
+    _list_next(NULL), _serialized(false), _unloading(false), _leakp(false) {}
+  const ListEntry<T, IdType>* list_next() const { return _list_next; }
+  void set_list_next(const ListEntry<T, IdType>* next) const { _list_next = next; }
+  bool is_serialized() const { return _serialized; }
+  void set_serialized() const { _serialized = true; }
+  bool is_unloading() const { return _unloading; }
+  void set_unloading() const { _unloading = true; }
+  bool is_leakp() const { return _leakp; }
+  void set_leakp() const { _leakp = true; }
+ private:
+  mutable const ListEntry<T, IdType>* _list_next;
+  mutable bool _serialized;
+  mutable bool _unloading;
+  mutable bool _leakp;
+};
+
 class JfrSymbolId : public JfrCHeapObj {
   template <typename, typename, template<typename, typename> class, typename, size_t>
   friend class HashTableHost;
   typedef HashTableHost<const Symbol*, traceid, ListEntry, JfrSymbolId> SymbolTable;
   typedef HashTableHost<const char*, traceid, ListEntry, JfrSymbolId> CStringTable;
+  friend class JfrArtifactSet;
  public:
   typedef SymbolTable::HashEntry SymbolEntry;
   typedef CStringTable::HashEntry CStringEntry;
@@ -172,11 +251,11 @@
   bool _class_unload;
 
   // hashtable(s) callbacks
-  void assign_id(const SymbolEntry* entry);
-  bool equals(const Symbol* query, uintptr_t hash, const SymbolEntry* entry);
+  void link(const SymbolEntry* entry);
+  bool equals(uintptr_t hash, const SymbolEntry* entry);
   void unlink(const SymbolEntry* entry);
-  void assign_id(const CStringEntry* entry);
-  bool equals(const char* query, uintptr_t hash, const CStringEntry* entry);
+  void link(const CStringEntry* entry);
+  bool equals(uintptr_t hash, const CStringEntry* entry);
   void unlink(const CStringEntry* entry);
 
   template <typename Functor, typename T>
@@ -189,50 +268,21 @@
     }
   }
 
+  traceid mark_unsafe_anonymous_klass_name(const InstanceKlass* k, bool leakp);
+  bool is_unsafe_anonymous_klass(const Klass* k);
+  uintptr_t unsafe_anonymous_klass_name_hash(const InstanceKlass* ik);
+
  public:
-  static bool is_unsafe_anonymous_klass(const Klass* k);
-  static const char* create_unsafe_anonymous_klass_symbol(const InstanceKlass* ik, uintptr_t& hashcode);
-  static uintptr_t unsafe_anonymous_klass_name_hash_code(const InstanceKlass* ik);
-  static uintptr_t regular_klass_name_hash_code(const Klass* k);
-
   JfrSymbolId();
   ~JfrSymbolId();
 
   void clear();
   void set_class_unload(bool class_unload);
 
-  traceid mark_unsafe_anonymous_klass_name(const Klass* k);
-  traceid mark(const Symbol* sym, uintptr_t hash);
-  traceid mark(const Klass* k);
-  traceid mark(const Symbol* symbol);
-  traceid mark(const char* str, uintptr_t hash);
-
-  const SymbolEntry* map_symbol(const Symbol* symbol) const;
-  const SymbolEntry* map_symbol(uintptr_t hash) const;
-  const CStringEntry* map_cstring(uintptr_t hash) const;
-
-  template <typename Functor>
-  void symbol(Functor& functor, const Klass* k) {
-    if (is_unsafe_anonymous_klass(k)) {
-      return;
-    }
-    functor(map_symbol(regular_klass_name_hash_code(k)));
-  }
-
-  template <typename Functor>
-  void symbol(Functor& functor, const Method* method) {
-    assert(method != NULL, "invariant");
-    functor(map_symbol((uintptr_t)method->name()->identity_hash()));
-    functor(map_symbol((uintptr_t)method->signature()->identity_hash()));
-  }
-
-  template <typename Functor>
-  void cstring(Functor& functor, const Klass* k) {
-    if (!is_unsafe_anonymous_klass(k)) {
-      return;
-    }
-    functor(map_cstring(unsafe_anonymous_klass_name_hash_code((const InstanceKlass*)k)));
-  }
+  traceid mark(uintptr_t hash, const Symbol* sym, bool leakp);
+  traceid mark(const Klass* k, bool leakp);
+  traceid mark(const Symbol* symbol, bool leakp);
+  traceid mark(uintptr_t hash, const char* str, bool leakp);
 
   template <typename Functor>
   void iterate_symbols(Functor& functor) {
@@ -278,11 +328,11 @@
   void clear();
 
 
-  traceid mark(const Symbol* sym, uintptr_t hash);
-  traceid mark(const Klass* klass);
-  traceid mark(const Symbol* symbol);
-  traceid mark(const char* const str, uintptr_t hash);
-  traceid mark_unsafe_anonymous_klass_name(const Klass* klass);
+  traceid mark(uintptr_t hash, const Symbol* sym, bool leakp);
+  traceid mark(const Klass* klass, bool leakp);
+  traceid mark(const Symbol* symbol, bool leakp);
+  traceid mark(uintptr_t hash, const char* const str, bool leakp);
+  traceid mark_unsafe_anonymous_klass_name(const Klass* klass, bool leakp);
 
   const JfrSymbolId::SymbolEntry* map_symbol(const Symbol* symbol) const;
   const JfrSymbolId::SymbolEntry* map_symbol(uintptr_t hash) const;
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -98,6 +98,9 @@
   static traceid use(const PackageEntry* package);
   static traceid use(const ClassLoaderData* cld);
 
+  // leak profiler
+  static void set_leakp(const Klass* klass, const Method* method);
+
   static void remove(const Klass* klass);
   static void restore(const Klass* klass);
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -61,7 +61,7 @@
 inline traceid JfrTraceId::use(const Klass* klass) {
   assert(klass != NULL, "invariant");
   if (SHOULD_TAG(klass)) {
-    JfrTraceIdEpoch::set_klass_tagged_in_epoch();
+    JfrTraceIdEpoch::set_changed_tag_state();
     return set_used_and_get(klass);
   }
   assert(USED_THIS_EPOCH(klass), "invariant");
@@ -76,12 +76,20 @@
 inline traceid JfrTraceId::use(const Klass* klass, const Method* method) {
   assert(klass != NULL, "invariant");
   assert(method != NULL, "invariant");
-  SET_METHOD_FLAG_USED_THIS_EPOCH(method);
+  bool changed_tag_state = false;
   if (SHOULD_TAG_KLASS_METHOD(klass)) {
-    JfrTraceIdEpoch::set_klass_tagged_in_epoch();
     SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
+    changed_tag_state = true;
   }
   assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
+  if (METHOD_FLAG_NOT_USED_THIS_EPOCH(method)) {
+    SET_METHOD_FLAG_USED_THIS_EPOCH(method);
+    changed_tag_state = true;
+  }
+  assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
+  if (changed_tag_state) {
+    JfrTraceIdEpoch::set_changed_tag_state();
+  }
   return (METHOD_ID(klass, method));
 }
 
@@ -100,6 +108,13 @@
   return cld->is_unsafe_anonymous() ? 0 : set_used_and_get(cld);
 }
 
+inline void JfrTraceId::set_leakp(const Klass* klass, const Method* method) {
+  assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
+  assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
+  SET_LEAKP(klass);
+  SET_METHOD_LEAKP(method);
+}
+
 inline bool JfrTraceId::in_visible_set(const Klass* klass) {
   assert(klass != NULL, "invariant");
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -31,7 +31,7 @@
 // The regular epoch shift happens only during a safepoint.
 // The fence is there only for the emergency dump case which happens outside of safepoint.
 bool JfrTraceIdEpoch::_epoch_state = false;
-bool volatile JfrTraceIdEpoch::_klass_tagged_in_epoch = false;
+bool volatile JfrTraceIdEpoch::_tag_state = false;
 
 void JfrTraceIdEpoch::shift_epoch() {
   _epoch_state = !_epoch_state;
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -44,7 +44,7 @@
   friend class JfrCheckpointManager;
  private:
   static bool _epoch_state;
-  static bool volatile _klass_tagged_in_epoch;
+  static bool volatile _tag_state;
 
   static void shift_epoch();
 
@@ -89,17 +89,17 @@
     return _epoch_state ? METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS :  METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS;
   }
 
-  static bool is_klass_tagged_in_epoch() {
-    if (OrderAccess::load_acquire(&_klass_tagged_in_epoch)) {
-      OrderAccess::release_store(&_klass_tagged_in_epoch, false);
+  static bool has_changed_tag_state() {
+    if (OrderAccess::load_acquire(&_tag_state)) {
+      OrderAccess::release_store(&_tag_state, false);
       return true;
     }
     return false;
   }
 
-  static void set_klass_tagged_in_epoch() {
-    if (!OrderAccess::load_acquire(&_klass_tagged_in_epoch)) {
-      OrderAccess::release_store(&_klass_tagged_in_epoch, true);
+  static void set_changed_tag_state() {
+    if (!OrderAccess::load_acquire(&_tag_state)) {
+      OrderAccess::release_store(&_tag_state, true);
     }
   }
 };
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -44,15 +44,19 @@
 
 // static bits
 #define META_SHIFT                                8
-#define SERIALIZED_BIT                            ((USED_BIT << 1) << META_SHIFT)
-#define TRANSIENT_BIT                             (USED_BIT << META_SHIFT)
+#define LEAKP_META_BIT                            USED_BIT
+#define LEAKP_BIT                                 (LEAKP_META_BIT << META_SHIFT)
+#define TRANSIENT_META_BIT                        (USED_BIT << 1)
+#define TRANSIENT_BIT                             (TRANSIENT_META_BIT << META_SHIFT)
+#define SERIALIZED_META_BIT                       (USED_BIT << 2)
+#define SERIALIZED_BIT                            (SERIALIZED_META_BIT << META_SHIFT)
 #define TRACE_ID_SHIFT                            16
 #define METHOD_ID_NUM_MASK                        ((1 << TRACE_ID_SHIFT) - 1)
-#define META_BITS                                 (SERIALIZED_BIT | TRANSIENT_BIT)
+#define META_BITS                                 (SERIALIZED_BIT | TRANSIENT_BIT | LEAKP_BIT)
 #define EVENT_BITS                                (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
 #define USED_BITS                                 (METHOD_USED_EPOCH_2_BIT | METHOD_USED_EPOCH_1_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)
 #define ALL_BITS                                  (META_BITS | EVENT_BITS | USED_BITS)
-#define ALL_BITS_MASK                             (~ALL_BITS)
+#define ALL_BITS_MASK                             (~(ALL_BITS))
 
 // epoch relative bits
 #define IN_USE_THIS_EPOCH_BIT                     (JfrTraceIdEpoch::in_use_this_epoch_bit())
@@ -83,18 +87,19 @@
 
 // predicates
 #define USED_THIS_EPOCH(ptr)                      (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_THIS_EPOCH_BIT)))
-#define NOT_USED_THIS_EPOCH(ptr)                  (!USED_THIS_EPOCH(ptr))
+#define NOT_USED_THIS_EPOCH(ptr)                  (!(USED_THIS_EPOCH(ptr)))
 #define USED_PREV_EPOCH(ptr)                      (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_PREV_EPOCH_BIT)))
 #define USED_ANY_EPOCH(ptr)                       (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)))
-#define METHOD_USED_THIS_EPOCH(kls)               (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)))
-#define METHOD_NOT_USED_THIS_EPOCH(kls)           (!METHOD_USED_THIS_EPOCH(kls))
-#define METHOD_USED_PREV_EPOCH(kls)               (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_IN_USE_PREV_EPOCH_BIT)))
-#define METHOD_USED_ANY_EPOCH(kls)                (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)))
-#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls)     (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS)))
-#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls)     (TRACE_ID_PREDICATE(kls, (TRANSIENT_BIT | METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS)))
+#define METHOD_USED_THIS_EPOCH(kls)               (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_NOT_USED_THIS_EPOCH(kls)           (!(METHOD_USED_THIS_EPOCH(kls)))
+#define METHOD_USED_PREV_EPOCH(kls)               (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_PREV_EPOCH_BIT)))
+#define METHOD_USED_ANY_EPOCH(kls)                (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls)     (TRACE_ID_PREDICATE(kls, (METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS)))
+#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls)     (TRACE_ID_PREDICATE(kls, (METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS)))
 #define METHOD_AND_CLASS_USED_ANY_EPOCH(kls)      (METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls))
-#define METHOD_FLAG_USED_THIS_EPOCH(method)       (METHOD_FLAG_PREDICATE(method, (TRANSIENT_BIT | METHOD_FLAG_IN_USE_THIS_EPOCH_BIT)))
-#define METHOD_FLAG_USED_PREV_EPOCH(method)       (METHOD_FLAG_PREDICATE(method, (TRANSIENT_BIT | METHOD_FLAG_IN_USE_PREV_EPOCH_BIT)))
+#define METHOD_FLAG_USED_THIS_EPOCH(method)       (METHOD_FLAG_PREDICATE(method, (METHOD_FLAG_IN_USE_THIS_EPOCH_BIT)))
+#define METHOD_FLAG_NOT_USED_THIS_EPOCH(method)   (!(METHOD_FLAG_USED_THIS_EPOCH(method)))
+#define METHOD_FLAG_USED_PREV_EPOCH(method)       (METHOD_FLAG_PREDICATE(method, (METHOD_FLAG_IN_USE_PREV_EPOCH_BIT)))
 
 // setters
 #define SET_USED_THIS_EPOCH(ptr)                  (TRACE_ID_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
@@ -110,7 +115,7 @@
 // types
 #define IS_JDK_JFR_EVENT_KLASS(kls)               (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_KLASS))
 #define IS_JDK_JFR_EVENT_SUBKLASS(kls)            (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_SUBKLASS))
-#define IS_NOT_AN_EVENT_SUB_KLASS(kls)            (!IS_JDK_JFR_EVENT_SUBKLASS(kls))
+#define IS_NOT_AN_EVENT_SUB_KLASS(kls)            (!(IS_JDK_JFR_EVENT_SUBKLASS(kls)))
 #define IS_EVENT_HOST_KLASS(kls)                  (TRACE_ID_PREDICATE(kls, EVENT_HOST_KLASS))
 #define SET_JDK_JFR_EVENT_KLASS(kls)              (TRACE_ID_TAG(kls, JDK_JFR_EVENT_KLASS))
 #define SET_JDK_JFR_EVENT_SUBKLASS(kls)           (TRACE_ID_TAG(kls, JDK_JFR_EVENT_SUBKLASS))
@@ -118,17 +123,22 @@
 #define EVENT_KLASS_MASK(kls)                     (TRACE_ID_RAW(kls) & EVENT_BITS)
 
 // meta
-#define SET_TRANSIENT(ptr)                        (TRACE_ID_META_TAG(ptr, USED_BIT))
+#define SET_LEAKP(ptr)                            (TRACE_ID_META_TAG(ptr, LEAKP_META_BIT))
+#define IS_LEAKP(ptr)                             (TRACE_ID_PREDICATE(ptr, LEAKP_BIT))
+#define SET_TRANSIENT(ptr)                        (TRACE_ID_META_TAG(ptr, TRANSIENT_META_BIT))
 #define IS_SERIALIZED(ptr)                        (TRACE_ID_PREDICATE(ptr, SERIALIZED_BIT))
-#define IS_NOT_SERIALIZED(ptr)                    (!IS_SERIALIZED(ptr))
-#define SHOULD_TAG(ptr)                           ((IS_NOT_SERIALIZED(ptr)) || (NOT_USED_THIS_EPOCH(ptr)))
-#define SHOULD_TAG_KLASS_METHOD(ptr)              ((IS_NOT_SERIALIZED(ptr)) || (METHOD_NOT_USED_THIS_EPOCH(ptr)))
-#define SET_SERIALIZED(ptr)                       (TRACE_ID_META_TAG(ptr, (USED_BIT << 1)))
-#define CLEAR_SERIALIZED(ptr)                     (TRACE_ID_META_CLEAR(ptr, (~(USED_BIT << 1 | USED_BIT))))
+#define IS_NOT_SERIALIZED(ptr)                    (!(IS_SERIALIZED(ptr)))
+#define SHOULD_TAG(ptr)                           (NOT_USED_THIS_EPOCH(ptr))
+#define SHOULD_TAG_KLASS_METHOD(ptr)              (METHOD_NOT_USED_THIS_EPOCH(ptr))
+#define SET_SERIALIZED(ptr)                       (TRACE_ID_META_TAG(ptr, SERIALIZED_META_BIT))
+#define CLEAR_SERIALIZED(ptr)                     (TRACE_ID_META_CLEAR(ptr, (~(SERIALIZED_META_BIT | TRANSIENT_META_BIT | LEAKP_META_BIT))))
 #define IS_METHOD_SERIALIZED(method)              (METHOD_FLAG_PREDICATE(method, SERIALIZED_BIT))
-#define METHOD_NOT_SERIALIZED(method)             (!IS_METHOD_SERIALIZED(method))
-#define SET_METHOD_TRANSIENT(method)              (METHOD_META_TAG(method, USED_BIT))
-#define SET_METHOD_SERIALIZED(method)             (METHOD_META_TAG(method, (USED_BIT << 1)))
-#define CLEAR_METHOD_SERIALIZED(method)           (METHOD_META_CLEAR(method, (USED_BIT << 1 | USED_BIT)))
+#define IS_METHOD_LEAKP_USED(method)              (METHOD_FLAG_PREDICATE(method, LEAKP_BIT))
+#define METHOD_NOT_SERIALIZED(method)             (!(IS_METHOD_SERIALIZED(method)))
+#define SET_METHOD_LEAKP(method)                  (METHOD_META_TAG(method, LEAKP_META_BIT))
+#define SET_METHOD_SERIALIZED(method)             (METHOD_META_TAG(method, SERIALIZED_META_BIT))
+#define CLEAR_METHOD_LEAKP(method)                (METHOD_META_CLEAR(method, (LEAKP_META_BIT)))
+#define CLEAR_METHOD_SERIALIZED(method)           (METHOD_META_CLEAR(method, (SERIALIZED_META_BIT | LEAKP_META_BIT)))
+#define CLEAR_LEAKP(ptr)                          (TRACE_ID_META_CLEAR(ptr, (~(LEAKP_META_BIT))))
 
 #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -39,7 +39,7 @@
   _start_nanos(0),
   _previous_start_ticks(0),
   _previous_start_nanos(0),
-  _previous_checkpoint_offset(0) {}
+  _last_checkpoint_offset(0) {}
 
 JfrChunkState::~JfrChunkState() {
   reset();
@@ -50,15 +50,15 @@
     JfrCHeapObj::free(_path, strlen(_path) + 1);
     _path = NULL;
   }
-  set_previous_checkpoint_offset(0);
+  set_last_checkpoint_offset(0);
 }
 
-void JfrChunkState::set_previous_checkpoint_offset(int64_t offset) {
-  _previous_checkpoint_offset = offset;
+void JfrChunkState::set_last_checkpoint_offset(int64_t offset) {
+  _last_checkpoint_offset = offset;
 }
 
-int64_t JfrChunkState::previous_checkpoint_offset() const {
-  return _previous_checkpoint_offset;
+int64_t JfrChunkState::last_checkpoint_offset() const {
+  return _last_checkpoint_offset;
 }
 
 int64_t JfrChunkState::previous_start_ticks() const {
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkState.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkState.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -36,7 +36,7 @@
   int64_t _start_nanos;
   int64_t _previous_start_ticks;
   int64_t _previous_start_nanos;
-  int64_t _previous_checkpoint_offset;
+  int64_t _last_checkpoint_offset;
 
   void update_start_ticks();
   void update_start_nanos();
@@ -46,8 +46,8 @@
   JfrChunkState();
   ~JfrChunkState();
   void reset();
-  int64_t previous_checkpoint_offset() const;
-  void set_previous_checkpoint_offset(int64_t offset);
+  int64_t last_checkpoint_offset() const;
+  void set_last_checkpoint_offset(int64_t offset);
   int64_t previous_start_ticks() const;
   int64_t previous_start_nanos() const;
   int64_t last_chunk_duration() const;
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -172,13 +172,13 @@
 
 static void write_checkpoint_header(JfrChunkWriter& cw, int64_t event_offset, bool flushpoint) {
   const int64_t delta = cw.last_checkpoint_offset() == 0 ? 0 : cw.last_checkpoint_offset() - event_offset;
-  const u1 checkpoint_type = flushpoint ? FLUSH | HEADER : HEADER;
+  const u4 checkpoint_type = flushpoint ? (u4)(FLUSH | HEADER) : (u4)HEADER;
   cw.reserve(sizeof(u4));
   cw.write<u8>(EVENT_CHECKPOINT);
   cw.write<u8>(JfrTicks::now().value());
   cw.write<u8>(0); // duration
   cw.write<u8>(delta); // to previous checkpoint
-  cw.write<u1>(checkpoint_type);
+  cw.write<u4>(checkpoint_type);
   cw.write<u4>(1); // pool count
   cw.write<u8>(TYPE_CHUNKHEADER);
   cw.write<u4>(1); // count
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -39,11 +39,11 @@
   }
 }
 
-JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, InstanceKlass* k) :
-  _klass(k), _methodid(id), _line(0), _bci(bci), _type(type) {}
+JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, const Method* method) :
+  _method(method), _methodid(id), _line(0), _bci(bci), _type(type) {}
 
 JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno) :
-  _klass(NULL), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
+  _method(NULL), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
 
 JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
   _next(NULL),
@@ -77,26 +77,6 @@
   }
 }
 
-void JfrStackTrace::operator=(const JfrStackTrace& trace) {
-  assert(_next == NULL, "invariant");
-  assert(_frames_ownership, "invariant");
-
-  if (_id == trace._id) {
-    assert(_hash == trace._hash, "invariant");
-    assert(_nr_of_frames == trace._nr_of_frames, "invariant");
-    return;
-  }
-  _next = NULL;
-  _id = trace._id;
-  _hash = trace._hash;
-  _nr_of_frames = trace._nr_of_frames;
-  _max_frames = trace._max_frames;
-  _reached_root = trace._reached_root;
-  _lineno = trace._lineno;
-  _written = false;
-  copy_frames(&_frames, trace._nr_of_frames, trace._frames);
-}
-
 template <typename Writer>
 static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
   w.write((u8)id);
@@ -220,7 +200,7 @@
     const int lineno = method->line_number_from_bci(bci);
     // Can we determine if it's inlined?
     _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
-    _frames[count] = JfrStackFrame(mid, bci, type, method->constants()->pool_holder());
+    _frames[count] = JfrStackFrame(mid, bci, type, method);
     st.samples_next();
     count++;
   }
@@ -231,12 +211,9 @@
 }
 
 void JfrStackFrame::resolve_lineno() const {
-  assert(_klass, "no klass pointer");
+  assert(_method, "no method pointer");
   assert(_line == 0, "already have linenumber");
-  const int id_num = _methodid & METHOD_ID_NUM_MASK;
-  const Method* const method = _klass->method_with_orig_idnum(id_num);
-  assert(method != NULL, "invariant");
-  _line = method->line_number_from_bci(_bci);
+  _line = _method->line_number_from_bci(_bci);
 }
 
 void JfrStackTrace::resolve_linenos() const {
@@ -275,7 +252,7 @@
     }
     // Can we determine if it's inlined?
     _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
-    _frames[count] = JfrStackFrame(mid, bci, type, method->constants()->pool_holder());
+    _frames[count] = JfrStackFrame(mid, bci, type, method);
     vfs.next();
     count++;
   }
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -29,7 +29,6 @@
 #include "jfr/utilities/jfrTypes.hpp"
 
 class frame;
-class InstanceKlass;
 class JavaThread;
 class JfrCheckpointWriter;
 class JfrChunkWriter;
@@ -38,14 +37,14 @@
 class JfrStackFrame {
   friend class ObjectSampleCheckpoint;
  private:
-  mutable InstanceKlass* _klass;
+  const Method* _method;
   traceid _methodid;
   mutable int _line;
   int _bci;
   u1 _type;
 
  public:
-  JfrStackFrame(const traceid& id, int bci, int type, InstanceKlass* klass);
+  JfrStackFrame(const traceid& id, int bci, int type, const Method* method);
   JfrStackFrame(const traceid& id, int bci, int type, int lineno);
 
   bool equals(const JfrStackFrame& rhs) const;
@@ -69,7 +68,6 @@
   friend class ObjectSampler;
   friend class OSThreadSampler;
   friend class StackTraceResolver;
-
  private:
   const JfrStackTrace* _next;
   JfrStackFrame* _frames;
@@ -102,14 +100,12 @@
   bool full_stacktrace() const { return _reached_root; }
 
   JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next);
-  void operator=(const JfrStackTrace& trace);
+  JfrStackTrace(JfrStackFrame* frames, u4 max_frames);
+  ~JfrStackTrace();
 
  public:
-  JfrStackTrace(JfrStackFrame* frames, u4 max_frames);
-  ~JfrStackTrace();
   unsigned int hash() const { return _hash; }
   traceid id() const { return _id; }
-  u4 number_of_frames() const { return _nr_of_frames; }
 };
 
 #endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -184,8 +184,10 @@
   assert(!tl->has_cached_stack_trace(), "invariant");
   JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
   stacktrace.record_safe(thread, skip);
-  assert(stacktrace.hash() != 0, "invariant");
-  tl->set_cached_stack_trace_id(instance().add(stacktrace), stacktrace.hash());
+  const unsigned int hash = stacktrace.hash();
+  if (hash != 0) {
+    tl->set_cached_stack_trace_id(instance().add(stacktrace), hash);
+  }
 }
 
 traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -40,7 +40,7 @@
   friend class JfrThreadSampleClosure;
   friend class ObjectSampleCheckpoint;
   friend class ObjectSampler;
-  friend class StackTraceResolver;
+  friend class StackTraceBlobInstaller;
 
  private:
   static const u4 TABLE_SIZE = 2053;
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -46,7 +46,7 @@
   _shelved_buffer(NULL),
   _stackframes(NULL),
   _trace_id(JfrTraceId::assign_thread_id()),
-  _thread_cp(),
+  _thread(),
   _data_lost(0),
   _stack_trace_id(max_julong),
   _user_time(0),
@@ -63,17 +63,17 @@
   return _data_lost;
 }
 
-bool JfrThreadLocal::has_thread_checkpoint() const {
-  return _thread_cp.valid();
+bool JfrThreadLocal::has_thread_blob() const {
+  return _thread.valid();
 }
 
-void JfrThreadLocal::set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) {
-  assert(!_thread_cp.valid(), "invariant");
-  _thread_cp = ref;
+void JfrThreadLocal::set_thread_blob(const JfrBlobHandle& ref) {
+  assert(!_thread.valid(), "invariant");
+  _thread = ref;
 }
 
-const JfrCheckpointBlobHandle& JfrThreadLocal::thread_checkpoint() const {
-  return _thread_cp;
+const JfrBlobHandle& JfrThreadLocal::thread_blob() const {
+  return _thread;
 }
 
 static void send_java_thread_start_event(JavaThread* jt) {
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -25,7 +25,7 @@
 #ifndef SHARE_JFR_SUPPORT_JFRTHREADLOCAL_HPP
 #define SHARE_JFR_SUPPORT_JFRTHREADLOCAL_HPP
 
-#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 
 class JavaThread;
@@ -41,7 +41,7 @@
   JfrBuffer* _shelved_buffer;
   mutable JfrStackFrame* _stackframes;
   mutable traceid _trace_id;
-  JfrCheckpointBlobHandle _thread_cp;
+  JfrBlobHandle _thread;
   u8 _data_lost;
   traceid _stack_trace_id;
   jlong _user_time;
@@ -212,9 +212,9 @@
     return _dead;
   }
 
-  bool has_thread_checkpoint() const;
-  void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle);
-  const JfrCheckpointBlobHandle& thread_checkpoint() const;
+  bool has_thread_blob() const;
+  void set_thread_blob(const JfrBlobHandle& handle);
+  const JfrBlobHandle& thread_blob() const;
 
   static void on_start(Thread* t);
   static void on_exit(Thread* t);
--- a/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -41,6 +41,31 @@
 #define REMOVE_ID(k) JfrTraceId::remove(k);
 #define RESTORE_ID(k) JfrTraceId::restore(k);
 
+class JfrTraceFlag {
+ private:
+  mutable jshort _flags;
+ public:
+  JfrTraceFlag() : _flags(0) {}
+  bool is_set(jshort flag) const {
+    return (_flags & flag) != 0;
+  }
+
+  jshort flags() const {
+    return _flags;
+  }
+
+  void set_flags(jshort flags) const {
+    _flags = flags;
+  }
+
+  jbyte* flags_addr() const {
+    return (jbyte*)&_flags;
+  }
+  jbyte* meta_addr() const {
+    return ((jbyte*)&_flags) + 1;
+  }
+};
+
 #define DEFINE_TRACE_FLAG mutable JfrTraceFlag _trace_flags
 
 #define DEFINE_TRACE_FLAG_ACCESSOR                 \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/utilities/jfrBlob.cpp	Mon Sep 02 19:42:46 2019 +0200
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/utilities/jfrBlob.hpp"
+
+JfrBlob::JfrBlob(const u1* checkpoint, size_t size) :
+  _data(JfrCHeapObj::new_array<u1>(size)),
+  _next(),
+  _size(size),
+  _written(false) {
+  assert(_data != NULL, "invariant");
+  memcpy(const_cast<u1*>(_data), checkpoint, size);
+}
+
+JfrBlob::~JfrBlob() {
+  JfrCHeapObj::free(const_cast<u1*>(_data), _size);
+}
+
+void JfrBlob::reset_write_state() const {
+  if (!_written) {
+    return;
+  }
+  _written = false;
+  if (_next.valid()) {
+    _next->reset_write_state();
+  }
+}
+
+void JfrBlob::set_next(const JfrBlobHandle& ref) {
+  if (_next == ref) {
+    return;
+  }
+  assert(_next != ref, "invariant");
+  if (_next.valid()) {
+    _next->set_next(ref);
+    return;
+  }
+  _next = ref;
+}
+
+JfrBlobHandle JfrBlob::make(const u1* data, size_t size) {
+  const JfrBlob* const blob = new JfrBlob(data, size);
+  assert(blob != NULL, "invariant");
+  return JfrBlobReference::make(blob);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/utilities/jfrBlob.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_UTILITIES_JFRBLOB_HPP
+#define SHARE_JFR_UTILITIES_JFRBLOB_HPP
+
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/utilities/jfrRefCountPointer.hpp"
+
+class JfrBlob;
+typedef RefCountPointer<JfrBlob, MultiThreadedRefCounter> JfrBlobReference;
+typedef RefCountHandle<JfrBlobReference> JfrBlobHandle;
+
+class JfrBlob : public JfrCHeapObj {
+  template <typename, typename>
+  friend class RefCountPointer;
+ private:
+  const u1* const _data;
+  JfrBlobHandle _next;
+  const size_t _size;
+  mutable bool _written;
+
+  JfrBlob(const u1* data, size_t size);
+  ~JfrBlob();
+
+ public:
+  void set_next(const JfrBlobHandle& ref);
+  void reset_write_state() const;
+  static JfrBlobHandle make(const u1* data, size_t size);
+  template <typename Writer>
+  void write(Writer& writer) const {
+    writer.bytes(_data, _size);
+    if (_next.valid()) {
+      _next->write(writer);
+    }
+  }
+  template <typename Writer>
+  void exclusive_write(Writer& writer) const {
+    if (_written) {
+      return;
+    }
+    writer.bytes(_data, _size);
+    _written = true;
+    if (_next.valid()) {
+      _next->exclusive_write(writer);
+    }
+  }
+};
+
+#endif // SHARE_JFR_UTILITIES_JFRBLOB_HPP
--- a/src/hotspot/share/jfr/utilities/jfrHashtable.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/utilities/jfrHashtable.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -25,13 +25,13 @@
 #ifndef SHARE_JFR_UTILITIES_JFRHASHTABLE_HPP
 #define SHARE_JFR_UTILITIES_JFRHASHTABLE_HPP
 
-#include "memory/allocation.inline.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
 #include "runtime/orderAccess.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
 template <typename T>
-class JfrBasicHashtableEntry {
+class JfrBasicHashtableEntry : public JfrCHeapObj {
  private:
   typedef JfrBasicHashtableEntry<T> Entry;
   Entry* _next;
@@ -39,8 +39,8 @@
   uintptr_t _hash;
 
  public:
+  JfrBasicHashtableEntry(uintptr_t hash, const T& data) : _next(NULL), _literal(data), _hash(hash) {}
   uintptr_t hash() const { return _hash; }
-  void set_hash(uintptr_t hash) { _hash = hash; }
   T literal() const { return _literal; }
   T* literal_addr() { return &_literal; }
   void set_literal(T s) { _literal = s; }
@@ -110,18 +110,18 @@
 };
 
 template <typename IdType, typename Entry, typename T>
-class AscendingId : public CHeapObj<mtTracing>  {
+class AscendingId : public JfrCHeapObj  {
  private:
   IdType _id;
  public:
   AscendingId() : _id(0) {}
   // callbacks
-  void assign_id(Entry* entry) {
+  void link(Entry* entry) {
     assert(entry != NULL, "invariant");
     assert(entry->id() == 0, "invariant");
     entry->set_id(++_id);
   }
-  bool equals(const T& data, uintptr_t hash, const Entry* entry) {
+  bool equals(uintptr_t hash, const Entry* entry) {
     assert(entry->hash() == hash, "invariant");
     return true;
   }
@@ -129,64 +129,46 @@
 
 // IdType must be scalar
 template <typename T, typename IdType>
-class Entry : public JfrBasicHashtableEntry<T> {
+class JfrHashtableEntry : public JfrBasicHashtableEntry<T> {
  public:
+  JfrHashtableEntry(uintptr_t hash, const T& data) : JfrBasicHashtableEntry<T>(hash, data), _id(0) {}
   typedef IdType ID;
-  void init() { _id = 0; }
   ID id() const { return _id; }
   void set_id(ID id) const { _id = id; }
-  void set_value(const T& value) { this->set_literal(value); }
-  T& value() const { return *const_cast<Entry*>(this)->literal_addr();}
-  const T* value_addr() const { return const_cast<Entry*>(this)->literal_addr(); }
+  T& value() const { return *const_cast<JfrHashtableEntry*>(this)->literal_addr();}
+  const T* value_addr() const { return const_cast<JfrHashtableEntry*>(this)->literal_addr(); }
  private:
   mutable ID _id;
 };
 
-template <typename T, typename IdType>
-class ListEntry : public Entry<T, IdType> {
- public:
-  void init() { Entry<T, IdType>::init(); _list_next = NULL; _serialized = false; _unloading = false; }
-  const ListEntry<T, IdType>* list_next() const { return _list_next; }
-  void set_list_next(const ListEntry<T, IdType>* next) const { _list_next = next; }
-  bool is_serialized() const { return _serialized; }
-  void set_serialized() const { _serialized = true; }
-  bool is_unloading() const { return _unloading; }
-  void set_unloading() const { _unloading = true; }
- private:
-  mutable const ListEntry<T, IdType>* _list_next;
-  mutable bool _serialized;
-  mutable bool _unloading;
-};
-
 template <typename T, typename IdType, template <typename, typename> class Entry,
           typename Callback = AscendingId<IdType, Entry<T, IdType>, T> ,
           size_t TABLE_SIZE = 1009>
 class HashTableHost : public JfrBasicHashtable<T> {
  public:
   typedef Entry<T, IdType> HashEntry;
-  HashTableHost() : _callback(new Callback()) {}
-  HashTableHost(Callback* cb) : JfrBasicHashtable<T>(TABLE_SIZE, sizeof(HashEntry)), _callback(cb) {}
+  HashTableHost(size_t size = 0) : JfrBasicHashtable<T>(size == 0 ? TABLE_SIZE : size, sizeof(HashEntry)), _callback(new Callback()) {}
+  HashTableHost(Callback* cb, size_t size = 0) : JfrBasicHashtable<T>(size == 0 ? TABLE_SIZE : size, sizeof(HashEntry)), _callback(cb) {}
   ~HashTableHost() {
     this->clear_entries();
     this->free_buckets();
   }
 
   // direct insert assumes non-existing entry
-  HashEntry& put(const T& data, uintptr_t hash);
+  HashEntry& put(uintptr_t hash, const T& data);
 
   // lookup entry, will put if not found
-  HashEntry& lookup_put(const T& data, uintptr_t hash) {
-    HashEntry* entry = lookup_only(data, hash);
-    return entry == NULL ? put(data, hash) : *entry;
+  HashEntry& lookup_put(uintptr_t hash, const T& data) {
+    HashEntry* entry = lookup_only(hash);
+    return entry == NULL ? put(hash, data) : *entry;
   }
 
-  // read-only lookup
-  HashEntry* lookup_only(const T& query, uintptr_t hash);
+  HashEntry* lookup_only(uintptr_t hash);
 
   // id retrieval
-  IdType id(const T& data, uintptr_t hash) {
+  IdType id(uintptr_t hash, const T& data) {
     assert(data != NULL, "invariant");
-    const HashEntry& entry = lookup_put(data, hash);
+    const HashEntry& entry = lookup_put(hash, data);
     assert(entry.id() > 0, "invariant");
     return entry.id();
   }
@@ -206,34 +188,34 @@
     assert(entry != NULL, "invariant");
     JfrBasicHashtable<T>::unlink_entry(entry);
     _callback->unlink(entry);
-    FREE_C_HEAP_ARRAY(char, entry);
+    delete entry;
   }
 
  private:
   Callback* _callback;
   size_t index_for(uintptr_t hash) { return this->hash_to_index(hash); }
-  HashEntry* new_entry(const T& data, uintptr_t hash);
+  HashEntry* new_entry(uintptr_t hash, const T& data);
   void add_entry(size_t index, HashEntry* new_entry) {
     assert(new_entry != NULL, "invariant");
-    _callback->assign_id(new_entry);
+    _callback->link(new_entry);
     assert(new_entry->id() > 0, "invariant");
     JfrBasicHashtable<T>::add_entry(index, new_entry);
   }
 };
 
 template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
-Entry<T, IdType>& HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::put(const T& data, uintptr_t hash) {
-  assert(lookup_only(data, hash) == NULL, "use lookup_put()");
-  HashEntry* const entry = new_entry(data, hash);
+Entry<T, IdType>& HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::put(uintptr_t hash, const T& data) {
+  assert(lookup_only(hash) == NULL, "use lookup_put()");
+  HashEntry* const entry = new_entry(hash, data);
   add_entry(index_for(hash), entry);
   return *entry;
 }
 
 template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
-Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::lookup_only(const T& query, uintptr_t hash) {
+Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::lookup_only(uintptr_t hash) {
   HashEntry* entry = (HashEntry*)this->bucket(index_for(hash));
   while (entry != NULL) {
-    if (entry->hash() == hash && _callback->equals(query, hash, entry)) {
+    if (entry->hash() == hash && _callback->equals(hash, entry)) {
       return entry;
     }
     entry = (HashEntry*)entry->next();
@@ -285,13 +267,10 @@
 }
 
 template <typename T, typename IdType, template <typename, typename> class Entry, typename Callback, size_t TABLE_SIZE>
-Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::new_entry(const T& data, uintptr_t hash) {
+Entry<T, IdType>* HashTableHost<T, IdType, Entry, Callback, TABLE_SIZE>::new_entry(uintptr_t hash, const T& data) {
   assert(sizeof(HashEntry) == this->entry_size(), "invariant");
-  HashEntry* const entry = (HashEntry*) NEW_C_HEAP_ARRAY2(char, this->entry_size(), mtTracing, CURRENT_PC);
-  entry->init();
-  entry->set_hash(hash);
-  entry->set_value(data);
-  entry->set_next(NULL);
+  HashEntry* const entry = new HashEntry(hash, data);
+  assert(entry != NULL, "invariant");
   assert(0 == entry->id(), "invariant");
   return entry;
 }
--- a/src/hotspot/share/jfr/utilities/jfrTypes.hpp	Fri Aug 30 20:39:38 2019 +0200
+++ b/src/hotspot/share/jfr/utilities/jfrTypes.hpp	Mon Sep 02 19:42:46 2019 +0200
@@ -52,37 +52,12 @@
   return compare_traceid(*lhs, *rhs);
 }
 
-class JfrTraceFlag {
- private:
-  mutable jshort _flags;
- public:
-  JfrTraceFlag() : _flags(0) {}
-  bool is_set(jshort flag) const {
-    return (_flags & flag) != 0;
-  }
-
-  jshort flags() const {
-    return _flags;
-  }
-
-  void set_flags(jshort flags) const {
-    _flags = flags;
-  }
-
-  jbyte* flags_addr() const {
-    return (jbyte*)&_flags;
-  }
-  jbyte* meta_addr() const {
-    return ((jbyte*)&_flags) + 1;
-  }
-};
-
 enum EventStartTime {
   UNTIMED,
   TIMED
 };
 
-enum JfrCheckpointType : u1 {
+enum JfrCheckpointType {
   GENERIC,
   FLUSH,
   HEADER,