src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
changeset 58132 caa25ab47aca
parent 57644 446dcfc2a925
child 58157 9dca61a7df19
child 58679 9c3209ff7550
child 58863 c16ac7a2eba4
equal deleted inserted replaced
58131:3054503bad7d 58132:caa25ab47aca
    22  *
    22  *
    23  */
    23  */
    24 
    24 
    25 #include "precompiled.hpp"
    25 #include "precompiled.hpp"
    26 #include "jfr/jfrEvents.hpp"
    26 #include "jfr/jfrEvents.hpp"
    27 #include "jfr/recorder/jfrRecorder.hpp"
       
    28 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
       
    29 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
       
    30 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
       
    31 #include "jfr/leakprofiler/chains/edgeStore.hpp"
    27 #include "jfr/leakprofiler/chains/edgeStore.hpp"
    32 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
    28 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
    33 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
    29 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
    34 #include "jfr/leakprofiler/checkpoint/objectSampleWriter.hpp"
    30 #include "jfr/leakprofiler/checkpoint/objectSampleWriter.hpp"
    35 #include "jfr/leakprofiler/leakProfiler.hpp"
    31 #include "jfr/leakprofiler/leakProfiler.hpp"
    36 #include "jfr/leakprofiler/sampling/objectSample.hpp"
    32 #include "jfr/leakprofiler/sampling/objectSample.hpp"
    37 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
    33 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
    38 #include "jfr/leakprofiler/utilities/rootType.hpp"
    34 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
    39 #include "jfr/metadata/jfrSerializer.hpp"
    35 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
    40 #include "runtime/interfaceSupport.inline.hpp"
    36 #include "jfr/recorder/service/jfrOptionSet.hpp"
    41 #include "runtime/mutexLocker.hpp"
    37 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
    42 #include "runtime/thread.inline.hpp"
    38 #include "jfr/utilities/jfrHashtable.hpp"
    43 
    39 #include "jfr/utilities/jfrTypes.hpp"
    44 template <typename SampleProcessor>
    40 #include "runtime/safepoint.hpp"
    45 static void do_samples(ObjectSample* sample, const ObjectSample* const end, SampleProcessor& processor) {
    41 #include "runtime/thread.hpp"
       
    42 #include "utilities/growableArray.hpp"
       
    43 
       
    44 static bool predicate(GrowableArray<traceid>* set, traceid id) {
       
    45   assert(set != NULL, "invariant");
       
    46   bool found = false;
       
    47   set->find_sorted<traceid, compare_traceid>(id, found);
       
    48   return found;
       
    49 }
       
    50 
       
    51 static bool mutable_predicate(GrowableArray<traceid>* set, traceid id) {
       
    52   assert(set != NULL, "invariant");
       
    53   bool found = false;
       
    54   const int location = set->find_sorted<traceid, compare_traceid>(id, found);
       
    55   if (!found) {
       
    56     set->insert_before(location, id);
       
    57   }
       
    58   return found;
       
    59 }
       
    60 
       
    61 static bool add(GrowableArray<traceid>* set, traceid id) {
       
    62   assert(set != NULL, "invariant");
       
    63   return mutable_predicate(set, id);
       
    64 }
       
    65 
       
    66 const int initial_array_size = 64;
       
    67 
       
    68 template <typename T>
       
    69 static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
       
    70   return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
       
    71 }
       
    72 
       
    73 static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
       
    74 
       
    75 class ThreadIdExclusiveAccess : public StackObj {
       
    76  private:
       
    77   static Semaphore _mutex_semaphore;
       
    78  public:
       
    79   ThreadIdExclusiveAccess() { _mutex_semaphore.wait(); }
       
    80   ~ThreadIdExclusiveAccess() { _mutex_semaphore.signal(); }
       
    81 };
       
    82 
       
    83 Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
       
    84 
       
    85 static bool has_thread_exited(traceid tid) {
       
    86   assert(tid != 0, "invariant");
       
    87   return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid);
       
    88 }
       
    89 
       
    90 static void add_to_unloaded_thread_set(traceid tid) {
       
    91   ThreadIdExclusiveAccess lock;
       
    92   if (unloaded_thread_id_set == NULL) {
       
    93     unloaded_thread_id_set = c_heap_allocate_array<traceid>();
       
    94   }
       
    95   add(unloaded_thread_id_set, tid);
       
    96 }
       
    97 
       
    98 void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
       
    99   assert(jt != NULL, "invariant");
       
   100   if (LeakProfiler::is_running()) {
       
   101     add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id());
       
   102   }
       
   103 }
       
   104 
       
   105 // Track the set of unloaded klasses during a chunk / epoch.
       
   106 // Methods in stacktraces belonging to unloaded klasses must not be accessed.
       
   107 static GrowableArray<traceid>* unloaded_klass_set = NULL;
       
   108 
       
   109 static void add_to_unloaded_klass_set(traceid klass_id) {
       
   110   if (unloaded_klass_set == NULL) {
       
   111     unloaded_klass_set = c_heap_allocate_array<traceid>();
       
   112   }
       
   113   unloaded_klass_set->append(klass_id);
       
   114 }
       
   115 
       
   116 static void sort_unloaded_klass_set() {
       
   117   if (unloaded_klass_set != NULL && unloaded_klass_set->length() > 1) {
       
   118     unloaded_klass_set->sort(sort_traceid);
       
   119   }
       
   120 }
       
   121 
       
   122 void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) {
       
   123   assert(k != NULL, "invariant");
       
   124   add_to_unloaded_klass_set(TRACE_ID(k));
       
   125 }
       
   126 
       
   127 template <typename Processor>
       
   128 static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
    46   assert(sample != NULL, "invariant");
   129   assert(sample != NULL, "invariant");
    47   while (sample != end) {
   130   while (sample != end) {
    48     processor.sample_do(sample);
   131     processor.sample_do(sample);
    49     sample = sample->next();
   132     sample = sample->next();
    50   }
   133   }
    51 }
   134 }
    52 
   135 
    53 class RootSystemType : public JfrSerializer {
   136 template <typename Processor>
    54  public:
   137 static void iterate_samples(Processor& processor, bool all = false) {
    55   void serialize(JfrCheckpointWriter& writer) {
   138   ObjectSampler* const sampler = ObjectSampler::sampler();
    56     const u4 nof_root_systems = OldObjectRoot::_number_of_systems;
   139   assert(sampler != NULL, "invariant");
    57     writer.write_count(nof_root_systems);
   140   ObjectSample* const last = sampler->last();
    58     for (u4 i = 0; i < nof_root_systems; ++i) {
   141   assert(last != NULL, "invariant");
    59       writer.write_key(i);
   142   do_samples(last, all ? NULL : sampler->last_resolved(), processor);
    60       writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i));
   143 }
    61     }
   144 
    62   }
   145 class SampleMarker {
    63 };
       
    64 
       
    65 class RootType : public JfrSerializer {
       
    66  public:
       
    67   void serialize(JfrCheckpointWriter& writer) {
       
    68     const u4 nof_root_types = OldObjectRoot::_number_of_types;
       
    69     writer.write_count(nof_root_types);
       
    70     for (u4 i = 0; i < nof_root_types; ++i) {
       
    71       writer.write_key(i);
       
    72       writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i));
       
    73     }
       
    74   }
       
    75 };
       
    76 
       
    77 class CheckpointInstall {
       
    78  private:
       
    79   const JfrCheckpointBlobHandle& _cp;
       
    80  public:
       
    81   CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {}
       
    82   void sample_do(ObjectSample* sample) {
       
    83     assert(sample != NULL, "invariant");
       
    84     if (!sample->is_dead()) {
       
    85       sample->set_klass_checkpoint(_cp);
       
    86     }
       
    87   }
       
    88 };
       
    89 
       
    90 class CheckpointWrite {
       
    91  private:
       
    92   JfrCheckpointWriter& _writer;
       
    93   const jlong _last_sweep;
       
    94  public:
       
    95   CheckpointWrite(JfrCheckpointWriter& writer, jlong last_sweep) : _writer(writer), _last_sweep(last_sweep) {}
       
    96   void sample_do(ObjectSample* sample) {
       
    97     assert(sample != NULL, "invariant");
       
    98     if (sample->is_alive_and_older_than(_last_sweep)) {
       
    99       if (sample->has_thread_checkpoint()) {
       
   100         const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
       
   101         thread_cp->exclusive_write(_writer);
       
   102       }
       
   103       if (sample->has_klass_checkpoint()) {
       
   104         const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
       
   105         klass_cp->exclusive_write(_writer);
       
   106       }
       
   107     }
       
   108   }
       
   109 };
       
   110 
       
   111 class CheckpointStateReset {
       
   112  private:
       
   113   const jlong _last_sweep;
       
   114  public:
       
   115   CheckpointStateReset(jlong last_sweep) : _last_sweep(last_sweep) {}
       
   116   void sample_do(ObjectSample* sample) {
       
   117     assert(sample != NULL, "invariant");
       
   118     if (sample->is_alive_and_older_than(_last_sweep)) {
       
   119       if (sample->has_thread_checkpoint()) {
       
   120         const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint();
       
   121         thread_cp->reset_write_state();
       
   122       }
       
   123       if (sample->has_klass_checkpoint()) {
       
   124         const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint();
       
   125         klass_cp->reset_write_state();
       
   126       }
       
   127     }
       
   128   }
       
   129 };
       
   130 
       
   131 class StackTraceWrite {
       
   132  private:
       
   133   JfrStackTraceRepository& _stack_trace_repo;
       
   134   JfrCheckpointWriter& _writer;
       
   135   int _count;
       
   136  public:
       
   137   StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer) :
       
   138     _stack_trace_repo(stack_trace_repo), _writer(writer), _count(0) {
       
   139     JfrStacktrace_lock->lock_without_safepoint_check();
       
   140   }
       
   141   ~StackTraceWrite() {
       
   142     assert(JfrStacktrace_lock->owned_by_self(), "invariant");
       
   143     JfrStacktrace_lock->unlock();
       
   144   }
       
   145 
       
   146   void sample_do(ObjectSample* sample) {
       
   147     assert(sample != NULL, "invariant");
       
   148     if (!sample->is_dead()) {
       
   149       if (sample->has_stack_trace()) {
       
   150         JfrTraceId::use(sample->klass(), true);
       
   151         _stack_trace_repo.write(_writer, sample->stack_trace_id(), sample->stack_trace_hash());
       
   152         ++_count;
       
   153       }
       
   154     }
       
   155   }
       
   156 
       
   157   int count() const {
       
   158     return _count;
       
   159   }
       
   160 };
       
   161 
       
   162 class SampleMark {
       
   163  private:
   146  private:
   164   ObjectSampleMarker& _marker;
   147   ObjectSampleMarker& _marker;
   165   jlong _last_sweep;
   148   jlong _last_sweep;
   166   int _count;
   149   int _count;
   167  public:
   150  public:
   168   SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker),
   151   SampleMarker(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), _last_sweep(last_sweep), _count(0) {}
   169                                                              _last_sweep(last_sweep),
       
   170                                                              _count(0) {}
       
   171   void sample_do(ObjectSample* sample) {
   152   void sample_do(ObjectSample* sample) {
   172     assert(sample != NULL, "invariant");
       
   173     if (sample->is_alive_and_older_than(_last_sweep)) {
   153     if (sample->is_alive_and_older_than(_last_sweep)) {
   174       _marker.mark(sample->object());
   154       _marker.mark(sample->object());
   175       ++_count;
   155       ++_count;
   176     }
   156     }
   177   }
   157   }
   178 
       
   179   int count() const {
   158   int count() const {
   180     return _count;
   159     return _count;
   181   }
   160   }
   182 };
   161 };
   183 
   162 
   184 void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool type_set) {
   163 int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) {
   185   if (!writer.has_data()) {
   164   assert(sampler != NULL, "invariant");
       
   165   if (sampler->last() == NULL) {
       
   166     return 0;
       
   167   }
       
   168   SampleMarker sample_marker(marker, emit_all ? max_jlong : sampler->last_sweep().value());
       
   169   iterate_samples(sample_marker, true);
       
   170   return sample_marker.count();
       
   171 }
       
   172 
       
   173 class BlobCache {
       
   174   typedef HashTableHost<JfrBlobHandle, traceid, JfrHashtableEntry, BlobCache> BlobTable;
       
   175   typedef BlobTable::HashEntry BlobEntry;
       
   176  private:
       
   177   BlobTable _table;
       
   178   traceid _lookup_id;
       
   179  public:
       
   180   BlobCache(size_t size) : _table(this, size), _lookup_id(0) {}
       
   181   JfrBlobHandle get(const ObjectSample* sample);
       
   182   void put(const ObjectSample* sample, const JfrBlobHandle& blob);
       
   183   // Hash table callbacks
       
   184   void on_link(const BlobEntry* entry) const;
       
   185   bool on_equals(uintptr_t hash, const BlobEntry* entry) const;
       
   186   void on_unlink(BlobEntry* entry) const;
       
   187 };
       
   188 
       
   189 JfrBlobHandle BlobCache::get(const ObjectSample* sample) {
       
   190   assert(sample != NULL, "invariant");
       
   191   _lookup_id = sample->stack_trace_id();
       
   192   assert(_lookup_id != 0, "invariant");
       
   193   BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash());
       
   194   return entry != NULL ? entry->literal() : JfrBlobHandle();
       
   195 }
       
   196 
       
   197 void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) {
       
   198   assert(sample != NULL, "invariant");
       
   199   assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant");
       
   200   _lookup_id = sample->stack_trace_id();
       
   201   assert(_lookup_id != 0, "invariant");
       
   202   _table.put(sample->stack_trace_hash(), blob);
       
   203 }
       
   204 
       
   205 inline void BlobCache::on_link(const BlobEntry* entry) const {
       
   206   assert(entry != NULL, "invariant");
       
   207   assert(entry->id() == 0, "invariant");
       
   208   entry->set_id(_lookup_id);
       
   209 }
       
   210 
       
   211 inline bool BlobCache::on_equals(uintptr_t hash, const BlobEntry* entry) const {
       
   212   assert(entry != NULL, "invariant");
       
   213   assert(entry->hash() == hash, "invariant");
       
   214   return entry->id() == _lookup_id;
       
   215 }
       
   216 
       
   217 inline void BlobCache::on_unlink(BlobEntry* entry) const {
       
   218   assert(entry != NULL, "invariant");
       
   219 }
       
   220 
       
   221 static GrowableArray<traceid>* id_set = NULL;
       
   222 
       
   223 static void prepare_for_resolution() {
       
   224   id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size());
       
   225   sort_unloaded_klass_set();
       
   226 }
       
   227 
       
   228 static bool stack_trace_precondition(const ObjectSample* sample) {
       
   229   assert(sample != NULL, "invariant");
       
   230   return sample->has_stack_trace_id() && !sample->is_dead();
       
   231 }
       
   232 
       
   233 class StackTraceBlobInstaller {
       
   234  private:
       
   235   const JfrStackTraceRepository& _stack_trace_repo;
       
   236   BlobCache _cache;
       
   237   const JfrStackTrace* resolve(const ObjectSample* sample);
       
   238   void install(ObjectSample* sample);
       
   239  public:
       
   240   StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo);
       
   241   void sample_do(ObjectSample* sample) {
       
   242     if (stack_trace_precondition(sample)) {
       
   243       install(sample);
       
   244     }
       
   245   }
       
   246 };
       
   247 
       
   248 StackTraceBlobInstaller::StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo) :
       
   249   _stack_trace_repo(stack_trace_repo), _cache(JfrOptionSet::old_object_queue_size()) {
       
   250   prepare_for_resolution();
       
   251 }
       
   252 
       
   253 const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) {
       
   254   return _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id());
       
   255 }
       
   256 
       
   257 #ifdef ASSERT
       
   258 static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) {
       
   259   assert(!sample->has_stacktrace(), "invariant");
       
   260   assert(stack_trace != NULL, "invariant");
       
   261   assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant");
       
   262   assert(stack_trace->id() == sample->stack_trace_id(), "invariant");
       
   263 }
       
   264 #endif
       
   265 
       
   266 void StackTraceBlobInstaller::install(ObjectSample* sample) {
       
   267   JfrBlobHandle blob = _cache.get(sample);
       
   268   if (blob.valid()) {
       
   269     sample->set_stacktrace(blob);
   186     return;
   270     return;
   187   }
   271   }
   188 
   272   const JfrStackTrace* const stack_trace = resolve(sample);
   189   assert(writer.has_data(), "invariant");
   273   DEBUG_ONLY(validate_stack_trace(sample, stack_trace));
   190   const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
   274   JfrCheckpointWriter writer(false, true, Thread::current());
   191   CheckpointInstall install(h_cp);
   275   writer.write_type(TYPE_STACKTRACE);
   192 
   276   writer.write_count(1);
   193   // Class unload implies a safepoint.
   277   ObjectSampleCheckpoint::write_stacktrace(stack_trace, writer);
   194   // Not class unload implies the object sampler is locked, because it was claimed exclusively earlier.
   278   blob = writer.move();
   195   // Therefore: direct access the object sampler instance is safe.
   279   _cache.put(sample, blob);
   196   ObjectSampler* const object_sampler = ObjectSampler::sampler();
   280   sample->set_stacktrace(blob);
   197   assert(object_sampler != NULL, "invariant");
   281 }
   198 
   282 
   199   ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
   283 static void install_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
   200   const ObjectSample* const last_resolved = object_sampler->last_resolved();
   284   assert(sampler != NULL, "invariant");
   201 
   285   const ObjectSample* const last = sampler->last();
   202   // install only to new samples since last resolved checkpoint
   286   if (last != sampler->last_resolved()) {
   203   if (last != last_resolved) {
   287     StackTraceBlobInstaller installer(stack_trace_repo);
   204     do_samples(last, last_resolved, install);
   288     iterate_samples(installer);
   205     if (class_unload) {
   289   }
   206       return;
   290 }
       
   291 
       
   292 // caller needs ResourceMark
       
   293 void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
       
   294   assert(sampler != NULL, "invariant");
       
   295   assert(LeakProfiler::is_running(), "invariant");
       
   296   install_stack_traces(sampler, stack_trace_repo);
       
   297 }
       
   298 
       
   299 static traceid get_klass_id(traceid method_id) {
       
   300   assert(method_id != 0, "invariant");
       
   301   return method_id >> TRACE_ID_SHIFT;
       
   302 }
       
   303 
       
   304 static bool is_klass_unloaded(traceid method_id) {
       
   305   return unloaded_klass_set != NULL && predicate(unloaded_klass_set, get_klass_id(method_id));
       
   306 }
       
   307 
       
   308 static bool is_processed(traceid id) {
       
   309   assert(id != 0, "invariant");
       
   310   assert(id_set != NULL, "invariant");
       
   311   return mutable_predicate(id_set, id);
       
   312 }
       
   313 
       
   314 void ObjectSampleCheckpoint::add_to_leakp_set(const Method* method, traceid method_id) {
       
   315   if (is_processed(method_id) || is_klass_unloaded(method_id)) {
       
   316     return;
       
   317   }
       
   318   JfrTraceId::set_leakp(method);
       
   319 }
       
   320 
       
   321 void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
       
   322   assert(trace != NULL, "invariant");
       
   323   // JfrStackTrace
       
   324   writer.write(trace->id());
       
   325   writer.write((u1)!trace->_reached_root);
       
   326   writer.write(trace->_nr_of_frames);
       
   327   // JfrStackFrames
       
   328   for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
       
   329     const JfrStackFrame& frame = trace->_frames[i];
       
   330     frame.write(writer);
       
   331     add_to_leakp_set(frame._method, frame._methodid);
       
   332   }
       
   333 }
       
   334 
       
   335 static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) {
       
   336   if (reset) {
       
   337     blob->reset_write_state();
       
   338     return;
       
   339   }
       
   340   blob->exclusive_write(writer);
       
   341 }
       
   342 
       
   343 static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
       
   344   if (sample->has_type_set()) {
       
   345     write_blob(sample->type_set(), writer, reset);
       
   346   }
       
   347 }
       
   348 
       
   349 static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
       
   350   assert(sample->has_thread(), "invariant");
       
   351   if (has_thread_exited(sample->thread_id())) {
       
   352     write_blob(sample->thread(), writer, reset);
       
   353   }
       
   354 }
       
   355 
       
   356 static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
       
   357   if (sample->has_stacktrace()) {
       
   358     write_blob(sample->stacktrace(), writer, reset);
       
   359   }
       
   360 }
       
   361 
       
   362 static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
       
   363   assert(sample != NULL, "invariant");
       
   364   write_stacktrace_blob(sample, writer, reset);
       
   365   write_thread_blob(sample, writer, reset);
       
   366   write_type_set_blob(sample, writer, reset);
       
   367 }
       
   368 
       
   369 class BlobWriter {
       
   370  private:
       
   371   const ObjectSampler* _sampler;
       
   372   JfrCheckpointWriter& _writer;
       
   373   const jlong _last_sweep;
       
   374   bool _reset;
       
   375  public:
       
   376   BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
       
   377     _sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false)  {}
       
   378   void sample_do(ObjectSample* sample) {
       
   379     if (sample->is_alive_and_older_than(_last_sweep)) {
       
   380       write_blobs(sample, _writer, _reset);
   207     }
   381     }
   208     if (type_set) {
   382   }
   209       object_sampler->set_last_resolved(last);
   383   void set_reset() {
   210     }
   384     _reset = true;
   211   }
   385   }
   212 }
   386 };
   213 
   387 
   214 void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
   388 static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) {
       
   389   // sample set is predicated on time of last sweep
       
   390   const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
       
   391   JfrCheckpointWriter writer(false, false, thread);
       
   392   BlobWriter cbw(sampler, writer, last_sweep);
       
   393   iterate_samples(cbw, true);
       
   394   // reset blob write states
       
   395   cbw.set_reset();
       
   396   iterate_samples(cbw, true);
       
   397 }
       
   398 
       
   399 void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
   215   assert(sampler != NULL, "invariant");
   400   assert(sampler != NULL, "invariant");
   216   assert(edge_store != NULL, "invariant");
   401   assert(edge_store != NULL, "invariant");
   217   assert(thread != NULL, "invariant");
   402   assert(thread != NULL, "invariant");
   218 
   403   write_sample_blobs(sampler, emit_all, thread);
   219   static bool types_registered = false;
   404   // write reference chains
   220   if (!types_registered) {
       
   221     JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
       
   222     JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
       
   223     types_registered = true;
       
   224   }
       
   225 
       
   226   const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
       
   227   ObjectSample* const last = const_cast<ObjectSample*>(sampler->last());
       
   228   {
       
   229     JfrCheckpointWriter writer(false, false, thread);
       
   230     CheckpointWrite checkpoint_write(writer, last_sweep);
       
   231     do_samples(last, NULL, checkpoint_write);
       
   232   }
       
   233 
       
   234   CheckpointStateReset state_reset(last_sweep);
       
   235   do_samples(last, NULL, state_reset);
       
   236 
       
   237   if (!edge_store->is_empty()) {
   405   if (!edge_store->is_empty()) {
   238     // java object and chain representations
       
   239     JfrCheckpointWriter writer(false, true, thread);
   406     JfrCheckpointWriter writer(false, true, thread);
   240     ObjectSampleWriter osw(writer, edge_store);
   407     ObjectSampleWriter osw(writer, edge_store);
   241     edge_store->iterate(osw);
   408     edge_store->iterate(osw);
   242   }
   409   }
   243 }
   410 }
   244 
   411 
   245 int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) {
   412 static void clear_unloaded_klass_set() {
   246   assert(object_sampler != NULL, "invariant");
   413   if (unloaded_klass_set != NULL && unloaded_klass_set->is_nonempty()) {
   247   ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
   414     unloaded_klass_set->clear();
   248   if (last == NULL) {
   415   }
   249     return 0;
   416 }
   250   }
   417 
   251   const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
   418 // A linked list of saved type set blobs for the epoch.
   252   SampleMark mark(marker, last_sweep);
   419 // The link consist of a reference counted handle.
   253   do_samples(last, NULL, mark);
   420 static JfrBlobHandle saved_type_set_blobs;
   254   return mark.count();
   421 
   255 }
   422 static void release_state_for_previous_epoch() {
   256 
   423   // decrements the reference count and the list is reinitialized
   257 WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) :
   424   saved_type_set_blobs = JfrBlobHandle();
   258   _sampler(sampler), _stack_trace_repo(repo) {}
   425   clear_unloaded_klass_set();
   259 
   426 }
   260 bool WriteObjectSampleStacktrace::process() {
   427 
       
   428 class BlobInstaller {
       
   429  public:
       
   430   ~BlobInstaller() {
       
   431     release_state_for_previous_epoch();
       
   432   }
       
   433   void sample_do(ObjectSample* sample) {
       
   434     if (!sample->is_dead()) {
       
   435       sample->set_type_set(saved_type_set_blobs);
       
   436     }
       
   437   }
       
   438 };
       
   439 
       
   440 static void install_type_set_blobs() {
       
   441   BlobInstaller installer;
       
   442   iterate_samples(installer);
       
   443 }
       
   444 
       
   445 static void save_type_set_blob(JfrCheckpointWriter& writer, bool copy = false) {
       
   446   assert(writer.has_data(), "invariant");
       
   447   const JfrBlobHandle blob = copy ? writer.copy() : writer.move();
       
   448   if (saved_type_set_blobs.valid()) {
       
   449     saved_type_set_blobs->set_next(blob);
       
   450   } else {
       
   451     saved_type_set_blobs = blob;
       
   452   }
       
   453 }
       
   454 
       
   455 void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
   261   assert(LeakProfiler::is_running(), "invariant");
   456   assert(LeakProfiler::is_running(), "invariant");
   262   assert(_sampler != NULL, "invariant");
   457   const ObjectSample* last = ObjectSampler::sampler()->last();
   263 
   458   if (writer.has_data() && last != NULL) {
   264   ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last());
   459     save_type_set_blob(writer);
   265   const ObjectSample* const last_resolved = _sampler->last_resolved();
   460     install_type_set_blobs();
   266   if (last == last_resolved) {
   461     ObjectSampler::sampler()->set_last_resolved(last);
   267     return true;
   462   }
   268   }
   463 }
   269 
   464 
   270   JfrCheckpointWriter writer(false, true, Thread::current());
   465 void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
   271   const JfrCheckpointContext ctx = writer.context();
   466   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
   272 
   467   assert(LeakProfiler::is_running(), "invariant");
   273   writer.write_type(TYPE_STACKTRACE);
   468   if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
   274   const jlong count_offset = writer.reserve(sizeof(u4));
   469     save_type_set_blob(writer, true);
   275 
   470   }
   276   int count = 0;
   471 }
   277   {
       
   278     StackTraceWrite stack_trace_write(_stack_trace_repo, writer); // JfrStacktrace_lock
       
   279     do_samples(last, last_resolved, stack_trace_write);
       
   280     count = stack_trace_write.count();
       
   281   }
       
   282   if (count == 0) {
       
   283     writer.set_context(ctx);
       
   284     return true;
       
   285   }
       
   286   assert(count > 0, "invariant");
       
   287   writer.write_count((u4)count, count_offset);
       
   288   JfrStackTraceRepository::write_metadata(writer);
       
   289 
       
   290   // install the stacktrace checkpoint information to the candidates
       
   291   ObjectSampleCheckpoint::install(writer, false, false);
       
   292   return true;
       
   293 }