src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
changeset 58863 c16ac7a2eba4
parent 58836 31ec3e55fa3d
child 59261 4cf1246fbb9c
equal deleted inserted replaced
58861:2c3cc4b01880 58863:c16ac7a2eba4
    35 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
    35 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
    36 #include "jfr/recorder/service/jfrOptionSet.hpp"
    36 #include "jfr/recorder/service/jfrOptionSet.hpp"
    37 #include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
    37 #include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
    38 #include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
    38 #include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
    39 #include "jfr/utilities/jfrBigEndian.hpp"
    39 #include "jfr/utilities/jfrBigEndian.hpp"
       
    40 #include "jfr/utilities/jfrIterator.hpp"
       
    41 #include "jfr/utilities/jfrThreadIterator.hpp"
    40 #include "jfr/utilities/jfrTypes.hpp"
    42 #include "jfr/utilities/jfrTypes.hpp"
       
    43 #include "jfr/writers/jfrJavaEventWriter.hpp"
    41 #include "logging/log.hpp"
    44 #include "logging/log.hpp"
    42 #include "memory/resourceArea.hpp"
    45 #include "memory/resourceArea.hpp"
    43 #include "runtime/handles.inline.hpp"
    46 #include "runtime/handles.inline.hpp"
    44 #include "runtime/mutexLocker.hpp"
    47 #include "runtime/mutex.hpp"
    45 #include "runtime/orderAccess.hpp"
    48 #include "runtime/orderAccess.hpp"
    46 #include "runtime/os.inline.hpp"
    49 #include "runtime/os.inline.hpp"
    47 #include "runtime/safepoint.hpp"
    50 #include "runtime/safepoint.hpp"
    48 
    51 
    49 typedef JfrCheckpointManager::Buffer* BufferPtr;
    52 typedef JfrCheckpointManager::Buffer* BufferPtr;
   166   DEBUG_ONLY(assert_free_lease(buffer);)
   169   DEBUG_ONLY(assert_free_lease(buffer);)
   167   return buffer;
   170   return buffer;
   168 }
   171 }
   169 
   172 
   170 bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
   173 bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
   171   return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
   174   return _service_thread != thread && _checkpoint_epoch_state != JfrTraceIdEpoch::epoch();
   172 }
   175 }
   173 
   176 
   174 static const size_t lease_retry = 10;
   177 static const size_t lease_retry = 10;
   175 
   178 
   176 BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) {
   179 BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) {
   179     return lease_free(size, manager._epoch_transition_mspace, lease_retry, thread);
   182     return lease_free(size, manager._epoch_transition_mspace, lease_retry, thread);
   180   }
   183   }
   181   return lease_free(size, manager._free_list_mspace, lease_retry, thread);
   184   return lease_free(size, manager._free_list_mspace, lease_retry, thread);
   182 }
   185 }
   183 
   186 
       
   187 JfrCheckpointMspace* JfrCheckpointManager::lookup(BufferPtr old) const {
       
   188   assert(old != NULL, "invariant");
       
   189   return _free_list_mspace->in_free_list(old) ? _free_list_mspace : _epoch_transition_mspace;
       
   190 }
       
   191 
       
   192 BufferPtr JfrCheckpointManager::lease_buffer(BufferPtr old, Thread* thread, size_t size /* 0 */) {
       
   193   assert(old != NULL, "invariant");
       
   194   JfrCheckpointMspace* mspace = instance().lookup(old);
       
   195   assert(mspace != NULL, "invariant");
       
   196   return lease_free(size, mspace, lease_retry, thread);
       
   197 }
       
   198 
   184 /*
   199 /*
   185 * If the buffer was a "lease" from the free list, release back.
   200  * If the buffer was a lease, release back.
   186 *
   201  *
   187 * The buffer is effectively invalidated for the thread post-return,
   202  * The buffer is effectively invalidated for the thread post-return,
   188 * and the caller should take means to ensure that it is not referenced.
   203  * and the caller should take means to ensure that it is not referenced.
   189 */
   204  */
   190 static void release(BufferPtr const buffer, Thread* thread) {
   205 static void release(BufferPtr const buffer, Thread* thread) {
   191   DEBUG_ONLY(assert_release(buffer);)
   206   DEBUG_ONLY(assert_release(buffer);)
   192   buffer->clear_lease();
   207   buffer->clear_lease();
   193   buffer->release();
   208   buffer->release();
   194 }
   209 }
   200     // indicates a lease is being returned
   215     // indicates a lease is being returned
   201     release(old, thread);
   216     release(old, thread);
   202     return NULL;
   217     return NULL;
   203   }
   218   }
   204   // migration of in-flight information
   219   // migration of in-flight information
   205   BufferPtr const new_buffer = lease_buffer(thread, used + requested);
   220   BufferPtr const new_buffer = lease_buffer(old, thread, used + requested);
   206   if (new_buffer != NULL) {
   221   if (new_buffer != NULL) {
   207     migrate_outstanding_writes(old, new_buffer, used, requested);
   222     migrate_outstanding_writes(old, new_buffer, used, requested);
   208   }
   223   }
   209   release(old, thread);
   224   release(old, thread);
   210   return new_buffer; // might be NULL
   225   return new_buffer; // might be NULL
   211 }
   226 }
   212 
   227 
   213 // offsets into the JfrCheckpointEntry
   228 // offsets into the JfrCheckpointEntry
   214 static const juint starttime_offset = sizeof(jlong);
   229 static const juint starttime_offset = sizeof(jlong);
   215 static const juint duration_offset = starttime_offset + sizeof(jlong);
   230 static const juint duration_offset = starttime_offset + sizeof(jlong);
   216 static const juint flushpoint_offset = duration_offset + sizeof(jlong);
   231 static const juint checkpoint_type_offset = duration_offset + sizeof(jlong);
   217 static const juint types_offset = flushpoint_offset + sizeof(juint);
   232 static const juint types_offset = checkpoint_type_offset + sizeof(juint);
   218 static const juint payload_offset = types_offset + sizeof(juint);
   233 static const juint payload_offset = types_offset + sizeof(juint);
   219 
   234 
   220 template <typename Return>
   235 template <typename Return>
   221 static Return read_data(const u1* data) {
   236 static Return read_data(const u1* data) {
   222   return JfrBigEndian::read<Return>(data);
   237   return JfrBigEndian::read<Return>(data);
   232 
   247 
   233 static jlong duration(const u1* data) {
   248 static jlong duration(const u1* data) {
   234   return read_data<jlong>(data + duration_offset);
   249   return read_data<jlong>(data + duration_offset);
   235 }
   250 }
   236 
   251 
   237 static bool is_flushpoint(const u1* data) {
   252 static u1 checkpoint_type(const u1* data) {
   238   return read_data<juint>(data + flushpoint_offset) == (juint)1;
   253   return read_data<u1>(data + checkpoint_type_offset);
   239 }
   254 }
   240 
   255 
   241 static juint number_of_types(const u1* data) {
   256 static juint number_of_types(const u1* data) {
   242   return read_data<juint>(data + types_offset);
   257   return read_data<juint>(data + types_offset);
   243 }
   258 }
   244 
   259 
   245 static void write_checkpoint_header(JfrChunkWriter& cw, int64_t offset_prev_cp_event, const u1* data) {
   260 static void write_checkpoint_header(JfrChunkWriter& cw, int64_t delta_to_last_checkpoint, const u1* data) {
   246   cw.reserve(sizeof(u4));
   261   cw.reserve(sizeof(u4));
   247   cw.write<u8>(EVENT_CHECKPOINT);
   262   cw.write<u8>(EVENT_CHECKPOINT);
   248   cw.write(starttime(data));
   263   cw.write(starttime(data));
   249   cw.write(duration(data));
   264   cw.write(duration(data));
   250   cw.write(offset_prev_cp_event);
   265   cw.write(delta_to_last_checkpoint);
   251   cw.write(is_flushpoint(data));
   266   cw.write(checkpoint_type(data));
   252   cw.write(number_of_types(data));
   267   cw.write(number_of_types(data));
   253 }
   268 }
   254 
   269 
   255 static void write_checkpoint_content(JfrChunkWriter& cw, const u1* data, size_t size) {
   270 static void write_checkpoint_content(JfrChunkWriter& cw, const u1* data, size_t size) {
   256   assert(data != NULL, "invariant");
   271   assert(data != NULL, "invariant");
   259 
   274 
   260 static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) {
   275 static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) {
   261   assert(data != NULL, "invariant");
   276   assert(data != NULL, "invariant");
   262   const int64_t event_begin = cw.current_offset();
   277   const int64_t event_begin = cw.current_offset();
   263   const int64_t last_checkpoint_event = cw.last_checkpoint_offset();
   278   const int64_t last_checkpoint_event = cw.last_checkpoint_offset();
   264   const int64_t delta = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin;
   279   const int64_t delta_to_last_checkpoint = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin;
   265   const int64_t checkpoint_size = total_size(data);
   280   const int64_t checkpoint_size = total_size(data);
   266   write_checkpoint_header(cw, delta, data);
   281   write_checkpoint_header(cw, delta_to_last_checkpoint, data);
   267   write_checkpoint_content(cw, data, checkpoint_size);
   282   write_checkpoint_content(cw, data, checkpoint_size);
   268   const int64_t event_size = cw.current_offset() - event_begin;
   283   const int64_t event_size = cw.current_offset() - event_begin;
   269   cw.write_padded_at_offset<u4>(event_size, event_begin);
   284   cw.write_padded_at_offset<u4>(event_size, event_begin);
   270   cw.set_last_checkpoint_offset(event_begin);
   285   cw.set_last_checkpoint_offset(event_begin);
   271   return (size_t)checkpoint_size;
   286   return (size_t)checkpoint_size;
   303 };
   318 };
   304 
   319 
   305 typedef CheckpointWriteOp<JfrCheckpointMspace::Type> WriteOperation;
   320 typedef CheckpointWriteOp<JfrCheckpointMspace::Type> WriteOperation;
   306 typedef ReleaseOp<JfrCheckpointMspace> CheckpointReleaseOperation;
   321 typedef ReleaseOp<JfrCheckpointMspace> CheckpointReleaseOperation;
   307 
   322 
   308 template <template <typename> class WriterHost, template <typename, typename> class CompositeOperation>
   323 template <template <typename> class WriterHost, template <typename, typename, typename> class CompositeOperation>
   309 static size_t write_mspace(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) {
   324 static size_t write_mspace(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) {
   310   assert(mspace != NULL, "invariant");
   325   assert(mspace != NULL, "invariant");
   311   WriteOperation wo(chunkwriter);
   326   WriteOperation wo(chunkwriter);
   312   WriterHost<WriteOperation> wh(wo);
   327   WriterHost<WriteOperation> wh(wo);
   313   CheckpointReleaseOperation cro(mspace, Thread::current(), false);
   328   CheckpointReleaseOperation cro(mspace, Thread::current(), false);
   314   CompositeOperation<WriterHost<WriteOperation>, CheckpointReleaseOperation> co(&wh, &cro);
   329   CompositeOperation<WriterHost<WriteOperation>, CheckpointReleaseOperation, CompositeOperationAnd> co(&wh, &cro);
   315   assert(mspace->is_full_empty(), "invariant");
   330   assert(mspace->is_full_empty(), "invariant");
   316   process_free_list(co, mspace);
   331   process_free_list(co, mspace);
   317   return wo.processed();
   332   return wo.processed();
   318 }
   333 }
   319 
   334 
   329   return processed;
   344   return processed;
   330 }
   345 }
   331 
   346 
   332 size_t JfrCheckpointManager::write_epoch_transition_mspace() {
   347 size_t JfrCheckpointManager::write_epoch_transition_mspace() {
   333   return write_mspace<ExclusiveOp, CompositeOperation>(_epoch_transition_mspace, _chunkwriter);
   348   return write_mspace<ExclusiveOp, CompositeOperation>(_epoch_transition_mspace, _chunkwriter);
       
   349 }
       
   350 
       
   351 typedef MutexedWriteOp<WriteOperation> FlushOperation;
       
   352 
       
   353 size_t JfrCheckpointManager::flush() {
       
   354   WriteOperation wo(_chunkwriter);
       
   355   FlushOperation fo(wo);
       
   356   assert(_free_list_mspace->is_full_empty(), "invariant");
       
   357   process_free_list(fo, _free_list_mspace);
       
   358   return wo.processed();
   334 }
   359 }
   335 
   360 
   336 typedef DiscardOp<DefaultDiscarder<JfrBuffer> > DiscardOperation;
   361 typedef DiscardOp<DefaultDiscarder<JfrBuffer> > DiscardOperation;
   337 size_t JfrCheckpointManager::clear() {
   362 size_t JfrCheckpointManager::clear() {
   338   JfrTypeSet::clear();
   363   JfrTypeSet::clear();
   339   DiscardOperation discarder(mutexed); // mutexed discard mode
   364   DiscardOperation discarder(mutexed); // mutexed discard mode
   340   process_free_list(discarder, _free_list_mspace);
   365   process_free_list(discarder, _free_list_mspace);
   341   process_free_list(discarder, _epoch_transition_mspace);
   366   process_free_list(discarder, _epoch_transition_mspace);
   342   synchronize_epoch();
   367   synchronize_epoch();
   343   return discarder.processed();
   368   return discarder.elements();
   344 }
   369 }
   345 
   370 
   346 size_t JfrCheckpointManager::write_types() {
   371 // Optimization for write_static_type_set() and write_threads() is to write
   347   JfrCheckpointWriter writer(false, true, Thread::current());
   372 // directly into the epoch transition mspace because we will immediately
   348   JfrTypeManager::write_types(writer);
   373 // serialize and reset this mspace post-write.
       
   374 static JfrBuffer* get_epoch_transition_buffer(JfrCheckpointMspace* mspace, Thread* t) {
       
   375   assert(mspace != NULL, "invariant");
       
   376   JfrBuffer* const buffer = mspace->free_head();
       
   377   assert(buffer != NULL, "invariant");
       
   378   buffer->acquire(t);
       
   379   buffer->set_lease();
       
   380   DEBUG_ONLY(assert_free_lease(buffer);)
       
   381   return buffer;
       
   382 }
       
   383 
       
   384 bool JfrCheckpointManager::is_static_type_set_required() {
       
   385   return JfrTypeManager::has_new_static_type();
       
   386 }
       
   387 
       
   388 size_t JfrCheckpointManager::write_static_type_set() {
       
   389   Thread* const t = Thread::current();
       
   390   ResourceMark rm(t);
       
   391   HandleMark hm(t);
       
   392   JfrCheckpointWriter writer(t, get_epoch_transition_buffer(_epoch_transition_mspace, t), STATICS);
       
   393   JfrTypeManager::write_static_types(writer);
   349   return writer.used_size();
   394   return writer.used_size();
   350 }
   395 }
   351 
   396 
   352 size_t JfrCheckpointManager::write_safepoint_types() {
   397 size_t JfrCheckpointManager::write_threads() {
   353   // this is also a "flushpoint"
   398   Thread* const t = Thread::current();
   354   JfrCheckpointWriter writer(true, true, Thread::current());
   399   ResourceMark rm(t);
   355   JfrTypeManager::write_safepoint_types(writer);
   400   HandleMark hm(t);
       
   401   JfrCheckpointWriter writer(t, get_epoch_transition_buffer(_epoch_transition_mspace, t), THREADS);
       
   402   JfrTypeManager::write_threads(writer);
   356   return writer.used_size();
   403   return writer.used_size();
   357 }
   404 }
   358 
   405 
   359 void JfrCheckpointManager::write_type_set() {
   406 size_t JfrCheckpointManager::write_static_type_set_and_threads() {
   360   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   407   write_static_type_set();
   361   // can safepoint here
   408   write_threads();
   362   MutexLocker cld_lock(ClassLoaderDataGraph_lock);
   409   return write_epoch_transition_mspace();
   363   MutexLocker module_lock(Module_lock);
       
   364   if (!LeakProfiler::is_running()) {
       
   365     JfrCheckpointWriter writer(true, true, Thread::current());
       
   366     JfrTypeSet::serialize(&writer, NULL, false);
       
   367   } else {
       
   368     Thread* const t = Thread::current();
       
   369     JfrCheckpointWriter leakp_writer(false, true, t);
       
   370     JfrCheckpointWriter writer(false, true, t);
       
   371     JfrTypeSet::serialize(&writer, &leakp_writer, false);
       
   372     ObjectSampleCheckpoint::on_type_set(leakp_writer);
       
   373   }
       
   374 }
       
   375 
       
   376 void JfrCheckpointManager::write_type_set_for_unloaded_classes() {
       
   377   assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
       
   378   JfrCheckpointWriter writer(false, true, Thread::current());
       
   379   const JfrCheckpointContext ctx = writer.context();
       
   380   JfrTypeSet::serialize(&writer, NULL, true);
       
   381   if (LeakProfiler::is_running()) {
       
   382     ObjectSampleCheckpoint::on_type_set_unload(writer);
       
   383   }
       
   384   if (!JfrRecorder::is_recording()) {
       
   385     // discard by rewind
       
   386     writer.set_context(ctx);
       
   387   }
       
   388 }
       
   389 
       
   390 void JfrCheckpointManager::create_thread_blob(JavaThread* jt) {
       
   391   JfrTypeManager::create_thread_blob(jt);
       
   392 }
       
   393 
       
   394 void JfrCheckpointManager::write_thread_checkpoint(JavaThread* jt) {
       
   395   JfrTypeManager::write_thread_checkpoint(jt);
       
   396 }
   410 }
   397 
   411 
   398 void JfrCheckpointManager::shift_epoch() {
   412 void JfrCheckpointManager::shift_epoch() {
   399   debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
   413   debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
   400   JfrTraceIdEpoch::shift_epoch();
   414   JfrTraceIdEpoch::shift_epoch();
   401   assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
   415   assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
   402 }
   416 }
       
   417 
       
   418 void JfrCheckpointManager::on_rotation() {
       
   419   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
       
   420   JfrTypeManager::on_rotation();
       
   421   notify_threads();
       
   422 }
       
   423 
       
   424 void JfrCheckpointManager::write_type_set() {
       
   425   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
       
   426   if (LeakProfiler::is_running()) {
       
   427     Thread* const t = Thread::current();
       
   428     // can safepoint here
       
   429     MutexLocker cld_lock(ClassLoaderDataGraph_lock);
       
   430     MutexLocker module_lock(Module_lock);
       
   431     JfrCheckpointWriter leakp_writer(t);
       
   432     JfrCheckpointWriter writer(t);
       
   433     JfrTypeSet::serialize(&writer, &leakp_writer, false, false);
       
   434     ObjectSampleCheckpoint::on_type_set(leakp_writer);
       
   435   } else {
       
   436     // can safepoint here
       
   437     MutexLocker cld_lock(ClassLoaderDataGraph_lock);
       
   438     MutexLocker module_lock(Module_lock);
       
   439     JfrCheckpointWriter writer(Thread::current());
       
   440     JfrTypeSet::serialize(&writer, NULL, false, false);
       
   441   }
       
   442   write();
       
   443 }
       
   444 
       
   445 void JfrCheckpointManager::write_type_set_for_unloaded_classes() {
       
   446   assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
       
   447   JfrCheckpointWriter writer(Thread::current());
       
   448   const JfrCheckpointContext ctx = writer.context();
       
   449   JfrTypeSet::serialize(&writer, NULL, true, false);
       
   450   if (LeakProfiler::is_running()) {
       
   451     ObjectSampleCheckpoint::on_type_set_unload(writer);
       
   452   }
       
   453   if (!JfrRecorder::is_recording()) {
       
   454     // discard by rewind
       
   455     writer.set_context(ctx);
       
   456   }
       
   457 }
       
   458 
       
   459 bool JfrCheckpointManager::is_type_set_required() {
       
   460   return JfrTraceIdEpoch::has_changed_tag_state();
       
   461 }
       
   462 
       
   463 size_t JfrCheckpointManager::flush_type_set() {
       
   464   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
       
   465   size_t elements = 0;
       
   466   {
       
   467     JfrCheckpointWriter writer(Thread::current());
       
   468     // can safepoint here
       
   469     MutexLocker cld_lock(ClassLoaderDataGraph_lock);
       
   470     MutexLocker module_lock(Module_lock);
       
   471     elements = JfrTypeSet::serialize(&writer, NULL, false, true);
       
   472   }
       
   473   flush();
       
   474   return elements;
       
   475 }
       
   476 
       
   477 void JfrCheckpointManager::flush_static_type_set() {
       
   478   flush();
       
   479 }
       
   480 
       
   481 void JfrCheckpointManager::create_thread_blob(Thread* t) {
       
   482   JfrTypeManager::create_thread_blob(t);
       
   483 }
       
   484 
       
   485 void JfrCheckpointManager::write_thread_checkpoint(Thread* t) {
       
   486   JfrTypeManager::write_thread_checkpoint(t);
       
   487 }
       
   488 
       
   489 class JfrNotifyClosure : public ThreadClosure {
       
   490  public:
       
   491   void do_thread(Thread* t) {
       
   492     assert(t != NULL, "invariant");
       
   493     assert(t->is_Java_thread(), "invariant");
       
   494     assert_locked_or_safepoint(Threads_lock);
       
   495     JfrJavaEventWriter::notify((JavaThread*)t);
       
   496   }
       
   497 };
       
   498 
       
   499 void JfrCheckpointManager::notify_threads() {
       
   500   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
       
   501   JfrNotifyClosure tc;
       
   502   JfrJavaThreadIterator iter;
       
   503   while (iter.has_next()) {
       
   504     tc.do_thread(iter.next());
       
   505   }
       
   506 }