src/hotspot/share/jfr/leakprofiler/emitEventOperation.cpp
branchmetal-prototype-branch
changeset 57457 95604ec1205d
parent 57441 ee34e24af607
parent 55607 5919b273def6
child 57458 3a7c29ba6b1c
equal deleted inserted replaced
57441:ee34e24af607 57457:95604ec1205d
     1 /*
       
     2  * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 #include "precompiled.hpp"
       
    25 #include "gc/shared/collectedHeap.hpp"
       
    26 #include "jfr/jfrEvents.hpp"
       
    27 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
       
    28 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
       
    29 #include "jfr/leakprofiler/chains/edge.hpp"
       
    30 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
       
    31 #include "jfr/leakprofiler/chains/edgeStore.hpp"
       
    32 #include "jfr/leakprofiler/chains/bitset.hpp"
       
    33 #include "jfr/leakprofiler/sampling/objectSample.hpp"
       
    34 #include "jfr/leakprofiler/leakProfiler.hpp"
       
    35 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
       
    36 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
       
    37 #include "jfr/leakprofiler/emitEventOperation.hpp"
       
    38 #include "jfr/leakprofiler/chains/bfsClosure.hpp"
       
    39 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
       
    40 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
       
    41 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
       
    42 #include "jfr/support/jfrThreadId.hpp"
       
    43 #include "logging/log.hpp"
       
    44 #include "memory/resourceArea.hpp"
       
    45 #include "memory/universe.hpp"
       
    46 #include "oops/markOop.hpp"
       
    47 #include "oops/oop.inline.hpp"
       
    48 #include "runtime/safepoint.hpp"
       
    49 #include "runtime/vmThread.hpp"
       
    50 #include "utilities/globalDefinitions.hpp"
       
    51 
       
    52 /* The EdgeQueue is backed by directly managed virtual memory.
       
    53  * We will attempt to dimension an initial reservation
       
    54  * in proportion to the size of the heap (represented by heap_region).
       
    55  * Initial memory reservation: 5% of the heap OR at least 32 Mb
       
    56  * Commit ratio: 1 : 10 (subject to allocation granularties)
       
    57  */
       
    58 static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
       
    59   const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
       
    60   assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
       
    61   return memory_reservation_bytes;
       
    62 }
       
    63 
       
    64 static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
       
    65   const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
       
    66   assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
       
    67   return memory_commit_block_size_bytes;
       
    68 }
       
    69 
       
    70 static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
       
    71   log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
       
    72   log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
       
    73   log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
       
    74   if (edge_queue.reserved_size() > 0) {
       
    75     log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
       
    76       ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
       
    77   }
       
    78 }
       
    79 
       
    80 void EmitEventOperation::doit() {
       
    81   assert(LeakProfiler::is_running(), "invariant");
       
    82   _object_sampler = LeakProfiler::object_sampler();
       
    83   assert(_object_sampler != NULL, "invariant");
       
    84 
       
    85   _vm_thread = VMThread::vm_thread();
       
    86   assert(_vm_thread == Thread::current(), "invariant");
       
    87   _vm_thread_local = _vm_thread->jfr_thread_local();
       
    88   assert(_vm_thread_local != NULL, "invariant");
       
    89   assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
       
    90 
       
    91   // The VM_Operation::evaluate() which invoked doit()
       
    92   // contains a top level ResourceMark
       
    93 
       
    94   // save the original markWord for the potential leak objects
       
    95   // to be restored on function exit
       
    96   ObjectSampleMarker marker;
       
    97   if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
       
    98     return;
       
    99   }
       
   100 
       
   101   EdgeStore edge_store;
       
   102 
       
   103   GranularTimer::start(_cutoff_ticks, 1000000);
       
   104   if (_cutoff_ticks <= 0) {
       
   105     // no chains
       
   106     write_events(&edge_store);
       
   107     return;
       
   108   }
       
   109 
       
   110   assert(_cutoff_ticks > 0, "invariant");
       
   111 
       
   112   // The bitset used for marking is dimensioned as a function of the heap size
       
   113   const MemRegion heap_region = Universe::heap()->reserved_region();
       
   114   BitSet mark_bits(heap_region);
       
   115 
       
   116   // The edge queue is dimensioned as a fraction of the heap size
       
   117   const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
       
   118   EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
       
   119 
       
   120   // The initialize() routines will attempt to reserve and allocate backing storage memory.
       
   121   // Failure to accommodate will render root chain processing impossible.
       
   122   // As a fallback on failure, just write out the existing samples, flat, without chains.
       
   123   if (!(mark_bits.initialize() && edge_queue.initialize())) {
       
   124     log_warning(jfr)("Unable to allocate memory for root chain processing");
       
   125     write_events(&edge_store);
       
   126     return;
       
   127   }
       
   128 
       
   129   // necessary condition for attempting a root set iteration
       
   130   Universe::heap()->ensure_parsability(false);
       
   131 
       
   132   RootSetClosure::add_to_queue(&edge_queue);
       
   133   if (edge_queue.is_full()) {
       
   134     // Pathological case where roots don't fit in queue
       
   135     // Do a depth-first search, but mark roots first
       
   136     // to avoid walking sideways over roots
       
   137     DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
       
   138   } else {
       
   139     BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
       
   140     bfs.process();
       
   141   }
       
   142   GranularTimer::stop();
       
   143   write_events(&edge_store);
       
   144   log_edge_queue_summary(edge_queue);
       
   145 }
       
   146 
       
   147 int EmitEventOperation::write_events(EdgeStore* edge_store) {
       
   148   assert(_object_sampler != NULL, "invariant");
       
   149   assert(edge_store != NULL, "invariant");
       
   150   assert(_vm_thread != NULL, "invariant");
       
   151   assert(_vm_thread_local != NULL, "invariant");
       
   152   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
       
   153 
       
   154   // save thread id in preparation for thread local trace data manipulations
       
   155   const traceid vmthread_id = _vm_thread_local->thread_id();
       
   156   assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
       
   157 
       
   158   const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
       
   159   int count = 0;
       
   160 
       
   161   const ObjectSample* current = _object_sampler->first();
       
   162   while (current != NULL) {
       
   163     ObjectSample* prev = current->prev();
       
   164     if (current->is_alive_and_older_than(last_sweep)) {
       
   165       write_event(current, edge_store);
       
   166       ++count;
       
   167     }
       
   168     current = prev;
       
   169   }
       
   170 
       
   171   // restore thread local stack trace and thread id
       
   172   _vm_thread_local->set_thread_id(vmthread_id);
       
   173   _vm_thread_local->clear_cached_stack_trace();
       
   174   assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
       
   175 
       
   176   if (count > 0) {
       
   177     // serialize assoicated checkpoints
       
   178     ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
       
   179   }
       
   180   return count;
       
   181 }
       
   182 
       
   183 static int array_size(const oop object) {
       
   184   assert(object != NULL, "invariant");
       
   185   if (object->is_array()) {
       
   186     return arrayOop(object)->length();
       
   187   }
       
   188   return min_jint;
       
   189 }
       
   190 
       
   191 void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
       
   192   assert(sample != NULL, "invariant");
       
   193   assert(!sample->is_dead(), "invariant");
       
   194   assert(edge_store != NULL, "invariant");
       
   195   assert(_vm_thread_local != NULL, "invariant");
       
   196   const oop* object_addr = sample->object_addr();
       
   197   assert(*object_addr != NULL, "invariant");
       
   198 
       
   199   const Edge* edge = (const Edge*)(*object_addr)->mark();
       
   200   traceid gc_root_id = 0;
       
   201   if (edge == NULL) {
       
   202     // In order to dump out a representation of the event
       
   203     // even though it was not reachable / too long to reach,
       
   204     // we need to register a top level edge for this object
       
   205     Edge e(NULL, object_addr);
       
   206     edge_store->add_chain(&e, 1);
       
   207     edge = (const Edge*)(*object_addr)->mark();
       
   208   } else {
       
   209     gc_root_id = edge_store->get_root_id(edge);
       
   210   }
       
   211 
       
   212   assert(edge != NULL, "invariant");
       
   213   assert(edge->pointee() == *object_addr, "invariant");
       
   214   const traceid object_id = edge_store->get_id(edge);
       
   215   assert(object_id != 0, "invariant");
       
   216 
       
   217   EventOldObjectSample e(UNTIMED);
       
   218   e.set_starttime(GranularTimer::start_time());
       
   219   e.set_endtime(GranularTimer::end_time());
       
   220   e.set_allocationTime(sample->allocation_time());
       
   221   e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
       
   222   e.set_object(object_id);
       
   223   e.set_arrayElements(array_size(*object_addr));
       
   224   e.set_root(gc_root_id);
       
   225 
       
   226   // Temporarily assigning both the stack trace id and thread id
       
   227   // onto the thread local data structure of the VMThread (for the duration
       
   228   // of the commit() call). This trick provides a means to override
       
   229   // the event generation mechanism by injecting externally provided id's.
       
   230   // Here, in particular, this allows us to emit an old object event
       
   231   // supplying information from where the actual sampling occurred.
       
   232   _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
       
   233   assert(sample->has_thread(), "invariant");
       
   234   _vm_thread_local->set_thread_id(sample->thread_id());
       
   235   e.commit();
       
   236 }