# HG changeset patch # User mgronlun # Date 1566649827 -7200 # Node ID 00860d9caf4d5854ea9a160c4b2ca3befca1b061 # Parent 84ef29ccac560cf5ff2eff219f9f167ce6fd1a98 New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents. diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/dcmd/jfrDcmds.cpp --- a/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -444,7 +444,13 @@ jobjectArray settings = NULL; if (_settings.is_set()) { - const int length = _settings.value()->array()->length(); + int length = _settings.value()->array()->length(); + if (length == 1) { + const char* c_str = _settings.value()->array()->at(0); + if (strcmp(c_str, "none") == 0) { + length = 0; + } + } settings = JfrJavaSupport::new_string_array(length, CHECK); assert(settings != NULL, "invariant"); for (int i = 0; i < length; ++i) { diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp --- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -1521,7 +1521,7 @@ assert(new_method != NULL, "invariant"); assert(new_method->name() == old_method->name(), "invariant"); assert(new_method->signature() == old_method->signature(), "invariant"); - *new_method->trace_flags_addr() = old_method->trace_flags(); + new_method->set_trace_flags(old_method->trace_flags()); assert(new_method->trace_flags() == old_method->trace_flags(), "invariant"); } } diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/jfr.cpp --- a/src/hotspot/share/jfr/jfr.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/jfr.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -96,7 +96,9 @@ } void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { - LeakProfiler::oops_do(is_alive, f); + if (LeakProfiler::is_running()) { + LeakProfiler::oops_do(is_alive, f); + } } bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter) { diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/jni/jfrJavaCall.cpp --- a/src/hotspot/share/jfr/jni/jfrJavaCall.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/jni/jfrJavaCall.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -184,7 +184,7 @@ } } -JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(0) { +JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(-1) { assert(result != NULL, "invariant"); } @@ -193,7 +193,7 @@ _klass(NULL), _name(NULL), _signature(NULL), - _array_length(0) { + _array_length(-1) { assert(result != NULL, "invariant"); if (klass_name != NULL) { set_klass(klass_name, CHECK); @@ -210,7 +210,7 @@ _klass(NULL), _name(NULL), _signature(NULL), - _array_length(0) { + _array_length(-1) { assert(result != NULL, "invariant"); if (klass != NULL) { set_klass(klass); diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp --- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,7 +99,6 @@ } void BFSClosure::process() { - process_root_set(); process_queue(); } @@ -138,7 +137,6 @@ // if we are processinig initial root set, don't add to queue if (_current_parent != NULL) { - assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant"); _edge_queue->add(_current_parent, reference); } @@ -151,20 +149,8 @@ void BFSClosure::add_chain(const oop* reference, const oop pointee) { assert(pointee != NULL, "invariant"); assert(NULL == pointee->mark(), "invariant"); - - const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2; - ResourceMark rm; - Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length); - size_t idx = 0; - chain[idx++] = Edge(NULL, reference); - // aggregate from breadth-first search - const Edge* current = _current_parent; - while (current != NULL) { - chain[idx++] = Edge(NULL, current->reference()); - current = current->parent(); - } - assert(length == idx, "invariant"); - _edge_store->add_chain(chain, length); + Edge leak_edge(_current_parent, reference); + _edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2); } void BFSClosure::dfs_fallback() { @@ -241,3 +227,12 @@ closure_impl(UnifiedOop::encode(ref), pointee); } } + +void BFSClosure::do_root(const oop* ref) { + assert(ref != NULL, "invariant"); + assert(is_aligned(ref, HeapWordSize), "invariant"); + assert(*ref != NULL, "invariant"); + if (!_edge_queue->is_full()) { + _edge_queue->add(NULL, ref); + } +} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp --- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -26,7 +26,6 @@ #define SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP #include "memory/iterator.hpp" -#include "oops/oop.hpp" class BitSet; class Edge; @@ -65,6 +64,7 @@ public: BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits); void process(); + void do_root(const oop* ref); virtual void do_oop(oop* ref); virtual void do_oop(narrowOop* ref); diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp --- a/src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -47,7 +47,7 @@ BitMap::idx_t mark_obj(const HeapWord* addr) { const BitMap::idx_t bit = addr_to_bit(addr); - _bits.par_set_bit(bit); + _bits.set_bit(bit); return bit; } diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp --- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,14 +23,14 @@ */ #include "precompiled.hpp" +#include "jfr/leakprofiler/chains/bitset.hpp" #include "jfr/leakprofiler/chains/dfsClosure.hpp" #include "jfr/leakprofiler/chains/edge.hpp" #include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/rootSetClosure.hpp" #include "jfr/leakprofiler/utilities/granularTimer.hpp" -#include "jfr/leakprofiler/chains/bitset.hpp" +#include "jfr/leakprofiler/utilities/rootType.hpp" #include "jfr/leakprofiler/utilities/unifiedOop.hpp" -#include "jfr/leakprofiler/utilities/rootType.hpp" -#include "jfr/leakprofiler/chains/rootSetClosure.hpp" #include "memory/iterator.inline.hpp" #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" @@ -88,15 +88,15 @@ // Mark root set, to avoid going sideways _max_depth = 1; _ignore_root_set = false; - DFSClosure dfs1; - RootSetClosure::process_roots(&dfs1); + DFSClosure dfs; + RootSetClosure rs(&dfs); + rs.process(); // Depth-first search _max_depth = max_dfs_depth; _ignore_root_set = true; assert(_start_edge == NULL, "invariant"); - DFSClosure dfs2; - RootSetClosure::process_roots(&dfs2); + rs.process(); } void DFSClosure::closure_impl(const oop* reference, const oop pointee) { @@ -133,30 +133,29 @@ } void DFSClosure::add_chain() { - const size_t length = _start_edge == NULL ? _depth + 1 : - _start_edge->distance_to_root() + 1 + _depth + 1; + const size_t array_length = _depth + 2; ResourceMark rm; - Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length); + Edge* const chain = NEW_RESOURCE_ARRAY(Edge, array_length); size_t idx = 0; // aggregate from depth-first search const DFSClosure* c = this; while (c != NULL) { - chain[idx++] = Edge(NULL, c->reference()); + const size_t next = idx + 1; + chain[idx++] = Edge(&chain[next], c->reference()); c = c->parent(); } - - assert(idx == _depth + 1, "invariant"); + assert(_depth + 1 == idx, "invariant"); + assert(array_length == idx + 1, "invariant"); // aggregate from breadth-first search - const Edge* current = _start_edge; - while (current != NULL) { - chain[idx++] = Edge(NULL, current->reference()); - current = current->parent(); + if (_start_edge != NULL) { + chain[idx++] = *_start_edge; + } else { + chain[idx - 1] = Edge(NULL, chain[idx - 1].reference()); } - assert(idx == length, "invariant"); - _edge_store->add_chain(chain, length); + _edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0)); } void DFSClosure::do_oop(oop* ref) { @@ -176,3 +175,11 @@ closure_impl(UnifiedOop::encode(ref), pointee); } } + +void DFSClosure::do_root(const oop* ref) { + assert(ref != NULL, "invariant"); + assert(is_aligned(ref, HeapWordSize), "invariant"); + const oop pointee = *ref; + assert(pointee != NULL, "invariant"); + closure_impl(ref, pointee); +} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp --- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -26,7 +26,6 @@ #define SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP #include "memory/iterator.hpp" -#include "oops/oop.hpp" class BitSet; class Edge; @@ -34,7 +33,7 @@ class EdgeQueue; // Class responsible for iterating the heap depth-first -class DFSClosure: public BasicOopIterateClosure { +class DFSClosure : public BasicOopIterateClosure { private: static EdgeStore* _edge_store; static BitSet* _mark_bits; @@ -57,6 +56,7 @@ public: static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge); static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits); + void do_root(const oop* ref); virtual void do_oop(oop* ref); virtual void do_oop(narrowOop* ref); diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/edge.hpp --- a/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -29,7 +29,7 @@ #include "oops/oopsHierarchy.hpp" class Edge { - private: + protected: const Edge* _parent; const oop* _reference; public: diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,37 +27,17 @@ #include "jfr/leakprofiler/chains/edgeUtils.hpp" #include "oops/oop.inline.hpp" -RoutableEdge::RoutableEdge() : Edge() {} -RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), - _skip_edge(NULL), - _skip_length(0), - _processed(false) {} +StoredEdge::StoredEdge() : Edge() {} +StoredEdge::StoredEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {} -RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge), - _skip_edge(NULL), - _skip_length(0), - _processed(false) {} - -RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge), - _skip_edge(edge._skip_edge), - _skip_length(edge._skip_length), - _processed(edge._processed) {} +StoredEdge::StoredEdge(const Edge& edge) : Edge(edge), _gc_root_id(0), _skip_length(0) {} -void RoutableEdge::operator=(const RoutableEdge& edge) { - Edge::operator=(edge); - _skip_edge = edge._skip_edge; - _skip_length = edge._skip_length; - _processed = edge._processed; -} +StoredEdge::StoredEdge(const StoredEdge& edge) : Edge(edge), _gc_root_id(edge._gc_root_id), _skip_length(edge._skip_length) {} -size_t RoutableEdge::logical_distance_to_root() const { - size_t depth = 0; - const RoutableEdge* current = logical_parent(); - while (current != NULL) { - depth++; - current = current->logical_parent(); - } - return depth; +void StoredEdge::operator=(const StoredEdge& edge) { + Edge::operator=(edge); + _gc_root_id = edge._gc_root_id; + _skip_length = edge._skip_length; } traceid EdgeStore::_edge_id_counter = 0; @@ -69,79 +49,12 @@ EdgeStore::~EdgeStore() { assert(_edges != NULL, "invariant"); delete _edges; - _edges = NULL; -} - -const Edge* EdgeStore::get_edge(const Edge* edge) const { - assert(edge != NULL, "invariant"); - EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); - return entry != NULL ? entry->literal_addr() : NULL; -} - -const Edge* EdgeStore::put(const Edge* edge) { - assert(edge != NULL, "invariant"); - const RoutableEdge e = *edge; - assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant"); - EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference()); - return entry.literal_addr(); -} - -traceid EdgeStore::get_id(const Edge* edge) const { - assert(edge != NULL, "invariant"); - EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); - assert(entry != NULL, "invariant"); - return entry->id(); -} - -traceid EdgeStore::get_root_id(const Edge* edge) const { - assert(edge != NULL, "invariant"); - const Edge* root = EdgeUtils::root(*edge); - assert(root != NULL, "invariant"); - return get_id(root); -} - -void EdgeStore::add_chain(const Edge* chain, size_t length) { - assert(chain != NULL, "invariant"); - assert(length > 0, "invariant"); - - size_t bottom_index = length - 1; - const size_t top_index = 0; - - const Edge* stored_parent_edge = NULL; - - // determine level of shared ancestry - for (; bottom_index > top_index; --bottom_index) { - const Edge* stored_edge = get_edge(&chain[bottom_index]); - if (stored_edge != NULL) { - stored_parent_edge = stored_edge; - continue; - } - break; - } - - // insertion of new Edges - for (int i = (int)bottom_index; i >= (int)top_index; --i) { - Edge edge(stored_parent_edge, chain[i].reference()); - stored_parent_edge = put(&edge); - } - - const oop sample_object = stored_parent_edge->pointee(); - assert(sample_object != NULL, "invariant"); - assert(NULL == sample_object->mark(), "invariant"); - - // Install the "top" edge of the chain into the sample object mark oop. - // This associates the sample object with its navigable reference chain. - sample_object->set_mark(markOop(stored_parent_edge)); } bool EdgeStore::is_empty() const { return !_edges->has_entries(); } -size_t EdgeStore::number_of_entries() const { - return _edges->cardinality(); -} - void EdgeStore::assign_id(EdgeEntry* entry) { assert(entry != NULL, "invariant"); assert(entry->id() == 0, "invariant"); @@ -153,3 +66,259 @@ assert(entry->hash() == hash, "invariant"); return true; } + +void EdgeStore::unlink(EdgeEntry* entry) { + assert(entry != NULL, "invariant"); + // nothing +} + +#ifdef ASSERT +bool EdgeStore::contains(const oop* reference) const { + return get(reference) != NULL; +} +#endif + +StoredEdge* EdgeStore::get(const oop* reference) const { + assert(reference != NULL, "invariant"); + const StoredEdge e(NULL, reference); + EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference); + return entry != NULL ? entry->literal_addr() : NULL; +} + +StoredEdge* EdgeStore::put(const oop* reference) { + assert(reference != NULL, "invariant"); + const StoredEdge e(NULL, reference); + assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant"); + EdgeEntry& entry = _edges->put(e, (uintptr_t)reference); + return entry.literal_addr(); +} + +traceid EdgeStore::get_id(const Edge* edge) const { + assert(edge != NULL, "invariant"); + EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); + assert(entry != NULL, "invariant"); + return entry->id(); +} + +traceid EdgeStore::gc_root_id(const Edge* edge) const { + assert(edge != NULL, "invariant"); + const traceid gc_root_id = static_cast(edge)->gc_root_id(); + if (gc_root_id != 0) { + return gc_root_id; + } + // not cached + assert(edge != NULL, "invariant"); + const Edge* const root = EdgeUtils::root(*edge); + assert(root != NULL, "invariant"); + assert(root->parent() == NULL, "invariant"); + return get_id(root); +} + +static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_root, size_t* skip_length) { + assert(distance_to_root >= EdgeUtils::root_context, "invariant"); + assert(*skip_length == 0, "invariant"); + *skip_length = distance_to_root - (EdgeUtils::root_context - 1); + const Edge* const target = EdgeUtils::ancestor(**current, *skip_length); + assert(target != NULL, "invariant"); + assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant"); + return target; +} + +bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) { + assert(*previous != NULL, "invariant"); + assert((*previous)->parent() == NULL, "invariant"); + assert(*current != NULL, "invariant"); + assert((*current)->distance_to_root() == distance_to_root, "invariant"); + + if (distance_to_root < EdgeUtils::root_context) { + // nothing to skip + return false; + } + + size_t skip_length = 0; + const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length); + assert(skip_ancestor != NULL, "invariant"); + (*previous)->set_skip_length(skip_length); + + // lookup target + StoredEdge* stored_target = get(skip_ancestor->reference()); + if (stored_target != NULL) { + (*previous)->set_parent(stored_target); + // linked to existing, complete + return true; + } + + assert(stored_target == NULL, "invariant"); + stored_target = put(skip_ancestor->reference()); + assert(stored_target != NULL, "invariant"); + (*previous)->set_parent(stored_target); + *previous = stored_target; + *current = skip_ancestor->parent(); + return false; +} + +static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) { + assert(current_stored != NULL, "invariant"); + assert(*previous != NULL, "invariant"); + assert((*previous)->parent() == NULL, "invariant"); + (*previous)->set_parent(current_stored); +} + +static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) { + assert(edge != NULL, "invariant"); + assert(distance != NULL, "invariant"); + const StoredEdge* current = edge; + *distance = 1; + while (current != NULL && !current->is_skip_edge()) { + ++(*distance); + current = current->parent(); + } + return current; +} + +void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) { + assert(current_stored != NULL, "invariant"); + assert((*previous)->parent() == NULL, "invariant"); + size_t distance_to_skip_edge; // including the skip edge itself + const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge); + if (closest_skip_edge == NULL) { + // no found skip edge implies root + if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) { + link_edge(current_stored, previous); + return; + } + assert(current_stored->distance_to_root() == distance_to_skip_edge - 2, "invariant"); + put_skip_edge(previous, reinterpret_cast(¤t_stored), distance_to_skip_edge - 2); + return; + } + assert(closest_skip_edge->is_skip_edge(), "invariant"); + if (distance_to_skip_edge + previous_length <= EdgeUtils::leak_context) { + link_edge(current_stored, previous); + return; + } + // create a new skip edge with derived information from closest skip edge + (*previous)->set_skip_length(distance_to_skip_edge + closest_skip_edge->skip_length()); + (*previous)->set_parent(closest_skip_edge->parent()); +} + +StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) { + assert(*previous != NULL, "invariant"); + assert((*previous)->parent() == NULL, "invariant"); + assert(*current != NULL, "invariant"); + assert(!contains((*current)->reference()), "invariant"); + StoredEdge* const stored_edge = put((*current)->reference()); + assert(stored_edge != NULL, "invariant"); + link_edge(stored_edge, previous); + return stored_edge; +} + +bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) { + assert(*previous != NULL, "invariant"); + assert(*current != NULL, "invariant"); + size_t depth = 1; + while (*current != NULL && depth < limit) { + StoredEdge* stored_edge = get((*current)->reference()); + if (stored_edge != NULL) { + link_with_existing_chain(stored_edge, previous, depth); + return true; + } + stored_edge = link_new_edge(previous, current); + assert((*previous)->parent() != NULL, "invariant"); + *previous = stored_edge; + *current = (*current)->parent(); + ++depth; + } + return NULL == *current; +} + +// Install the immediate edge into the mark word of the leak candidate object +StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) { + assert(edge != NULL, "invariant"); + assert(!contains(edge->reference()), "invariant"); + StoredEdge* const leak_context_edge = put(edge->reference()); + oop sample_object = edge->pointee(); + assert(sample_object != NULL, "invariant"); + assert(NULL == sample_object->mark(), "invariant"); + sample_object->set_mark(markOop(leak_context_edge)); + return leak_context_edge; +} + +/* + * The purpose of put_chain() is to reify the edge sequence + * discovered during heap traversal with a normalized logical copy. + * This copy consist of two sub-sequences and a connecting link (skip edge). + * + * "current" can be thought of as the cursor (search) edge, it is not in the edge store. + * "previous" is always an edge in the edge store. + * The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store. + */ +void EdgeStore::put_chain(const Edge* chain, size_t length) { + assert(chain != NULL, "invariant"); + assert(chain->distance_to_root() + 1 == length, "invariant"); + StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain); + assert(leak_context_edge != NULL, "invariant"); + assert(leak_context_edge->parent() == NULL, "invariant"); + + if (1 == length) { + return; + } + + const Edge* current = chain->parent(); + assert(current != NULL, "invariant"); + StoredEdge* previous = leak_context_edge; + + // a leak context is the sequence of (limited) edges reachable from the leak candidate + if (put_edges(&previous, ¤t, EdgeUtils::leak_context)) { + // complete + assert(previous != NULL, "invariant"); + put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous)); + return; + } + + const size_t distance_to_root = length > EdgeUtils::leak_context ? length - 1 - EdgeUtils::leak_context : length - 1; + assert(current->distance_to_root() == distance_to_root, "invariant"); + + // a skip edge is the logical link + // connecting the leak context sequence with the root context sequence + if (put_skip_edge(&previous, ¤t, distance_to_root)) { + // complete + assert(previous != NULL, "invariant"); + assert(previous->is_skip_edge(), "invariant"); + assert(previous->parent() != NULL, "invariant"); + put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent())); + return; + } + + assert(current->distance_to_root() < EdgeUtils::root_context, "invariant"); + + // a root context is the sequence of (limited) edges reachable from the root + put_edges(&previous, ¤t, EdgeUtils::root_context); + assert(previous != NULL, "invariant"); + put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous)); +} + +void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const { + assert(leak_context_edge != NULL, "invariant"); + assert(root != NULL, "invariant"); + store_gc_root_id_in_leak_context_edge(leak_context_edge, root); + assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant"); +} + +// To avoid another traversal to resolve the root edge id later, +// cache it in the immediate leak context edge for fast retrieval. +void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const { + assert(leak_context_edge != NULL, "invariant"); + assert(leak_context_edge->gc_root_id() == 0, "invariant"); + assert(root != NULL, "invariant"); + assert(root->parent() == NULL, "invariant"); + assert(root->distance_to_root() == 0, "invariant"); + const StoredEdge* const stored_root = static_cast(root); + traceid root_id = stored_root->gc_root_id(); + if (root_id == 0) { + root_id = get_id(root); + stored_root->set_gc_root_id(root_id); + } + assert(root_id != 0, "invariant"); + leak_context_edge->set_gc_root_id(root_id); + assert(leak_context_edge->gc_root_id() == stored_root->gc_root_id(), "invariant"); +} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -25,64 +25,40 @@ #ifndef SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP #define SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP +#include "jfr/leakprofiler/chains/edge.hpp" #include "jfr/utilities/jfrHashtable.hpp" -#include "jfr/leakprofiler/chains/edge.hpp" #include "memory/allocation.hpp" typedef u8 traceid; -class RoutableEdge : public Edge { +class StoredEdge : public Edge { private: - mutable const RoutableEdge* _skip_edge; - mutable size_t _skip_length; - mutable bool _processed; + mutable traceid _gc_root_id; + size_t _skip_length; public: - RoutableEdge(); - RoutableEdge(const Edge* parent, const oop* reference); - RoutableEdge(const Edge& edge); - RoutableEdge(const RoutableEdge& edge); - void operator=(const RoutableEdge& edge); - - const RoutableEdge* skip_edge() const { return _skip_edge; } - size_t skip_length() const { return _skip_length; } + StoredEdge(); + StoredEdge(const Edge* parent, const oop* reference); + StoredEdge(const Edge& edge); + StoredEdge(const StoredEdge& edge); + void operator=(const StoredEdge& edge); - bool is_skip_edge() const { return _skip_edge != NULL; } - bool processed() const { return _processed; } - bool is_sentinel() const { - return _skip_edge == NULL && _skip_length == 1; - } - - void set_skip_edge(const RoutableEdge* edge) const { - assert(!is_skip_edge(), "invariant"); - assert(edge != this, "invariant"); - _skip_edge = edge; - } + traceid gc_root_id() const { return _gc_root_id; } + void set_gc_root_id(traceid root_id) const { _gc_root_id = root_id; } - void set_skip_length(size_t length) const { - _skip_length = length; - } - - void set_processed() const { - assert(!_processed, "invariant"); - _processed = true; - } + bool is_skip_edge() const { return _skip_length != 0; } + size_t skip_length() const { return _skip_length; } + void set_skip_length(size_t length) { _skip_length = length; } - // true navigation according to physical tree representation - const RoutableEdge* physical_parent() const { - return static_cast(parent()); - } + void set_parent(const Edge* edge) { this->_parent = edge; } - // logical navigation taking skip levels into account - const RoutableEdge* logical_parent() const { - return is_skip_edge() ? skip_edge() : physical_parent(); + StoredEdge* parent() const { + return const_cast(static_cast(Edge::parent())); } - - size_t logical_distance_to_root() const; }; class EdgeStore : public CHeapObj { - typedef HashTableHost EdgeHashTable; + typedef HashTableHost EdgeHashTable; typedef EdgeHashTable::HashEntry EdgeEntry; template friend class HashTableHost; + friend class EventEmitter; + friend class ObjectSampleWriter; + friend class ObjectSampleCheckpoint; private: static traceid _edge_id_counter; EdgeHashTable* _edges; @@ -97,23 +76,33 @@ // Hash table callbacks void assign_id(EdgeEntry* entry); bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry); + void unlink(EdgeEntry* entry); - const Edge* get_edge(const Edge* edge) const; - const Edge* put(const Edge* edge); + StoredEdge* get(const oop* reference) const; + StoredEdge* put(const oop* reference); + traceid gc_root_id(const Edge* edge) const; + + bool put_edges(StoredEdge** previous, const Edge** current, size_t length); + bool put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root); + void put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const; + + StoredEdge* associate_leak_context_with_candidate(const Edge* edge); + void store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const; + StoredEdge* link_new_edge(StoredEdge** previous, const Edge** current); + void link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length); + + template + void iterate(T& functor) const { _edges->iterate_value(functor); } + + DEBUG_ONLY(bool contains(const oop* reference) const;) public: EdgeStore(); ~EdgeStore(); - void add_chain(const Edge* chain, size_t length); bool is_empty() const; - size_t number_of_entries() const; - traceid get_id(const Edge* edge) const; - traceid get_root_id(const Edge* edge) const; - - template - void iterate_edges(T& functor) const { _edges->iterate_value(functor); } + void put_chain(const Edge* chain, size_t length); }; #endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,11 +38,7 @@ return (const Edge*)edge.pointee()->mark() == &edge; } -bool EdgeUtils::is_root(const Edge& edge) { - return edge.is_root(); -} - -static int field_offset(const Edge& edge) { +static int field_offset(const StoredEdge& edge) { assert(!edge.is_root(), "invariant"); const oop ref_owner = edge.reference_owner(); assert(ref_owner != NULL, "invariant"); @@ -56,7 +52,7 @@ return offset; } -static const InstanceKlass* field_type(const Edge& edge) { +static const InstanceKlass* field_type(const StoredEdge& edge) { assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant"); return (const InstanceKlass*)edge.reference_owner_klass(); } @@ -138,175 +134,18 @@ current = parent; parent = current->parent(); } - return current; -} - -// The number of references associated with the leak node; -// can be viewed as the leak node "context". -// Used to provide leak context for a "capped/skipped" reference chain. -static const size_t leak_context = 100; - -// The number of references associated with the root node; -// can be viewed as the root node "context". -// Used to provide root context for a "capped/skipped" reference chain. -static const size_t root_context = 100; - -// A limit on the reference chain depth to be serialized, -static const size_t max_ref_chain_depth = leak_context + root_context; - -const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) { - const RoutableEdge* current = &edge; - const RoutableEdge* parent = current->physical_parent(); - size_t seek = 0; - while (parent != NULL && seek != skip_length) { - seek++; - current = parent; - parent = parent->physical_parent(); - } - return current; -} - -#ifdef ASSERT -static void validate_skip_target(const RoutableEdge* skip_target) { - assert(skip_target != NULL, "invariant"); - assert(skip_target->distance_to_root() + 1 == root_context, "invariant"); - assert(skip_target->is_sentinel(), "invariant"); -} - -static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) { - assert(new_skip_edge != NULL, "invariant"); - assert(new_skip_edge->is_skip_edge(), "invariant"); - if (last_skip_edge != NULL) { - const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment); - validate_skip_target(target->logical_parent()); - return; - } - assert(last_skip_edge == NULL, "invariant"); - // only one level of logical indirection - validate_skip_target(new_skip_edge->logical_parent()); -} -#endif // ASSERT - -static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) { - assert(new_skip_edge != NULL, "invariant"); - assert(!new_skip_edge->is_skip_edge(), "invariant"); - assert(!new_skip_edge->processed(), "invariant"); - const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance); - assert(skip_target != NULL, "invariant"); - new_skip_edge->set_skip_edge(skip_target); - new_skip_edge->set_skip_length(skip_target_distance); - assert(new_skip_edge->is_skip_edge(), "invariant"); - assert(new_skip_edge->logical_parent() == skip_target, "invariant"); -} - -static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) { - assert(distance == 0, "invariant"); - const RoutableEdge* current = &edge; - while (current != NULL) { - if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) { - return current; - } - current = current->physical_parent(); - ++distance; - } + assert(current != NULL, "invariant"); return current; } -static void collapse_overlapping_chain(const RoutableEdge& edge, - const RoutableEdge* first_processed_edge, - size_t first_processed_distance) { - assert(first_processed_edge != NULL, "invariant"); - // first_processed_edge is already processed / written - assert(first_processed_edge->processed(), "invariant"); - assert(first_processed_distance + 1 <= leak_context, "invariant"); - - // from this first processed edge, attempt to fetch the last skip edge - size_t last_skip_edge_distance = 0; - const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance); - const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1; - - if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) { - // complete chain can be accommodated without modification - return; - } - - // backtrack one edge from existing processed edge - const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1); - assert(new_skip_edge != NULL, "invariant"); - assert(!new_skip_edge->processed(), "invariant"); - assert(new_skip_edge->parent() == first_processed_edge, "invariant"); - - size_t adjustment = 0; - if (last_skip_edge != NULL) { - assert(leak_context - 1 > first_processed_distance - 1, "invariant"); - adjustment = leak_context - first_processed_distance - 1; - assert(last_skip_edge_distance + 1 > adjustment, "invariant"); - install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment); - } else { - install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context); - new_skip_edge->logical_parent()->set_skip_length(1); // sentinel +const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) { + const Edge* current = &edge; + const Edge* parent = current->parent(); + size_t seek = 0; + while (parent != NULL && seek != distance) { + seek++; + current = parent; + parent = parent->parent(); } - - DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);) -} - -static void collapse_non_overlapping_chain(const RoutableEdge& edge, - const RoutableEdge* first_processed_edge, - size_t first_processed_distance) { - assert(first_processed_edge != NULL, "invariant"); - assert(!first_processed_edge->processed(), "invariant"); - // this implies that the first "processed" edge is the leak context relative "leaf" - assert(first_processed_distance + 1 == leak_context, "invariant"); - - const size_t distance_to_root = edge.distance_to_root(); - if (distance_to_root + 1 <= max_ref_chain_depth) { - // complete chain can be accommodated without constructing a skip edge - return; - } - - install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context); - first_processed_edge->logical_parent()->set_skip_length(1); // sentinel - - DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);) -} - -static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) { - assert(distance == 0, "invariant"); - const RoutableEdge* current = &edge; - while (current != NULL && distance < leak_context - 1) { - if (current->processed()) { - return current; - } - current = current->physical_parent(); - ++distance; - } - assert(distance <= leak_context - 1, "invariant"); return current; } - -/* - * Some vocabulary: - * ----------- - * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges. - * "Processed / written" means an edge that has already been serialized. - * "Skip edge" is an edge that contains additional information for logical routing purposes. - * "Skip target" is an edge used as a destination for a skip edge - */ -void EdgeUtils::collapse_chain(const RoutableEdge& edge) { - assert(is_leak_edge(edge), "invariant"); - - // attempt to locate an already processed edge inside current leak context (if any) - size_t first_processed_distance = 0; - const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance); - if (first_processed_edge == NULL) { - return; - } - - if (first_processed_edge->processed()) { - collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance); - } else { - collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance); - } - - assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant"); -} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.hpp --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -28,15 +28,17 @@ #include "memory/allocation.hpp" class Edge; -class RoutableEdge; class Symbol; class EdgeUtils : public AllStatic { public: - static bool is_leak_edge(const Edge& edge); + static const size_t leak_context = 100; + static const size_t root_context = 100; + static const size_t max_ref_chain_depth = leak_context + root_context; + static bool is_leak_edge(const Edge& edge); static const Edge* root(const Edge& edge); - static bool is_root(const Edge& edge); + static const Edge* ancestor(const Edge& edge, size_t distance); static bool is_array_element(const Edge& edge); static int array_index(const Edge& edge); @@ -44,8 +46,6 @@ static const Symbol* field_name_symbol(const Edge& edge); static jshort field_modifiers(const Edge& edge); - - static void collapse_chain(const RoutableEdge& edge); }; #endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGEUTILS_HPP diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "jfr/leakprofiler/leakProfiler.hpp" +#include "jfr/leakprofiler/chains/bfsClosure.hpp" +#include "jfr/leakprofiler/chains/bitset.hpp" +#include "jfr/leakprofiler/chains/dfsClosure.hpp" +#include "jfr/leakprofiler/chains/edge.hpp" +#include "jfr/leakprofiler/chains/edgeQueue.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/objectSampleMarker.hpp" +#include "jfr/leakprofiler/chains/rootSetClosure.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/objectSampleMarker.hpp" +#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp" +#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" +#include "jfr/leakprofiler/sampling/objectSample.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "jfr/leakprofiler/utilities/granularTimer.hpp" +#include "logging/log.hpp" +#include "memory/universe.hpp" + +#include "oops/oop.inline.hpp" +#include "runtime/safepoint.hpp" +#include "utilities/globalDefinitions.hpp" + +PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all) : + _sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all) {} + +/* The EdgeQueue is backed by directly managed virtual memory. + * We will attempt to dimension an initial reservation + * in proportion to the size of the heap (represented by heap_region). + * Initial memory reservation: 5% of the heap OR at least 32 Mb + * Commit ratio: 1 : 10 (subject to allocation granularties) + */ +static size_t edge_queue_memory_reservation(const MemRegion& heap_region) { + const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M); + assert(memory_reservation_bytes >= (size_t)32*M, "invariant"); + return memory_reservation_bytes; +} + +static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) { + const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10; + assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant"); + return memory_commit_block_size_bytes; +} + +static void log_edge_queue_summary(const EdgeQueue& edge_queue) { + log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K); + log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top()); + log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K); + if (edge_queue.reserved_size() > 0) { + log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n", + ((double)edge_queue.live_set() / (double)edge_queue.reserved_size())); + } +} + +void PathToGcRootsOperation::doit() { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(_cutoff_ticks > 0, "invariant"); + + // The bitset used for marking is dimensioned as a function of the heap size + const MemRegion heap_region = Universe::heap()->reserved_region(); + BitSet mark_bits(heap_region); + + // The edge queue is dimensioned as a fraction of the heap size + const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region); + EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size)); + + // The initialize() routines will attempt to reserve and allocate backing storage memory. + // Failure to accommodate will render root chain processing impossible. + // As a fallback on failure, just write out the existing samples, flat, without chains. + if (!(mark_bits.initialize() && edge_queue.initialize())) { + log_warning(jfr)("Unable to allocate memory for root chain processing"); + return; + } + + // Save the original markWord for the potential leak objects, + // to be restored on function exit + ObjectSampleMarker marker; + if (ObjectSampleCheckpoint::save_mark_words(_sampler, marker, _emit_all) == 0) { + // no valid samples to process + return; + } + + // Necessary condition for attempting a root set iteration + Universe::heap()->ensure_parsability(false); + + BFSClosure bfs(&edge_queue, _edge_store, &mark_bits); + RootSetClosure roots(&bfs); + + GranularTimer::start(_cutoff_ticks, 1000000); + roots.process(); + if (edge_queue.is_full()) { + // Pathological case where roots don't fit in queue + // Do a depth-first search, but mark roots first + // to avoid walking sideways over roots + DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits); + } else { + bfs.process(); + } + GranularTimer::stop(); + log_edge_queue_summary(edge_queue); + + // Emit old objects including their reference chains as events + EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time()); + emitter.write_events(_sampler, _edge_store, _emit_all); +} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP +#define SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP + +#include "jfr/leakprofiler/utilities/vmOperation.hpp" + +class EdgeStore; +class ObjectSampler; + +// Safepoint operation for finding paths to gc roots +class PathToGcRootsOperation : public OldObjectVMOperation { + private: + ObjectSampler* _sampler; + EdgeStore* const _edge_store; + const int64_t _cutoff_ticks; + const bool _emit_all; + + public: + PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all); + virtual void doit(); +}; + +#endif // SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp --- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -28,27 +28,26 @@ #include "classfile/stringTable.hpp" #include "classfile/systemDictionary.hpp" #include "gc/shared/strongRootsScope.hpp" +#include "jfr/leakprofiler/chains/bfsClosure.hpp" +#include "jfr/leakprofiler/chains/dfsClosure.hpp" #include "jfr/leakprofiler/chains/edgeQueue.hpp" #include "jfr/leakprofiler/chains/rootSetClosure.hpp" -#include "jfr/leakprofiler/utilities/saveRestore.hpp" #include "jfr/leakprofiler/utilities/unifiedOop.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" +#include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.hpp" #include "services/management.hpp" #include "utilities/align.hpp" -#if INCLUDE_JVMCI -#include "jvmci/jvmci.hpp" -#endif -RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) : - _edge_queue(edge_queue) { -} +template +RootSetClosure::RootSetClosure(Delegate* delegate) : _delegate(delegate) {} -void RootSetClosure::do_oop(oop* ref) { +template +void RootSetClosure::do_oop(oop* ref) { assert(ref != NULL, "invariant"); // We discard unaligned root references because // our reference tagging scheme will use @@ -62,50 +61,39 @@ } assert(is_aligned(ref, HeapWordSize), "invariant"); - const oop pointee = *ref; - if (pointee != NULL) { - closure_impl(ref, pointee); + if (*ref != NULL) { + _delegate->do_root(ref); } } -void RootSetClosure::do_oop(narrowOop* ref) { +template +void RootSetClosure::do_oop(narrowOop* ref) { assert(ref != NULL, "invariant"); assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); const oop pointee = RawAccess<>::oop_load(ref); if (pointee != NULL) { - closure_impl(UnifiedOop::encode(ref), pointee); - } -} - -void RootSetClosure::closure_impl(const oop* reference, const oop pointee) { - if (!_edge_queue->is_full()) { - _edge_queue->add(NULL, reference); + _delegate->do_root(UnifiedOop::encode(ref)); } } -void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) { - RootSetClosure rs(edge_queue); - process_roots(&rs); +class RootSetClosureMarkScope : public MarkScope {}; + +template +void RootSetClosure::process() { + RootSetClosureMarkScope mark_scope; + CLDToOopClosure cldt_closure(this, ClassLoaderData::_claim_none); + ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure); + CodeBlobToOopClosure blobs(this, false); + Threads::oops_do(this, &blobs); + ObjectSynchronizer::oops_do(this); + Universe::oops_do(this); + JNIHandles::oops_do(this); + JvmtiExport::oops_do(this); + SystemDictionary::oops_do(this); + Management::oops_do(this); + StringTable::oops_do(this); + AOTLoader::oops_do(this); } -class RootSetClosureMarkScope : public MarkScope { -}; - -void RootSetClosure::process_roots(OopClosure* closure) { - SaveRestoreCLDClaimBits save_restore_cld_claim_bits; - RootSetClosureMarkScope mark_scope; - - CLDToOopClosure cldt_closure(closure, ClassLoaderData::_claim_strong); - ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure); - CodeBlobToOopClosure blobs(closure, false); - Threads::oops_do(closure, &blobs); - ObjectSynchronizer::oops_do(closure); - Universe::oops_do(closure); - JNIHandles::oops_do(closure); - JvmtiExport::oops_do(closure); - SystemDictionary::oops_do(closure); - Management::oops_do(closure); - StringTable::oops_do(closure); - AOTLoader::oops_do(closure); - JVMCI_ONLY(JVMCI::oops_do(closure);) -} +template class RootSetClosure; +template class RootSetClosure; diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.hpp --- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -26,18 +26,14 @@ #define SHARE_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP #include "memory/iterator.hpp" -#include "oops/oop.hpp" -class EdgeQueue; - +template class RootSetClosure: public BasicOopIterateClosure { private: - RootSetClosure(EdgeQueue* edge_queue); - EdgeQueue* _edge_queue; - void closure_impl(const oop* reference, const oop pointee); + Delegate* const _delegate; public: - static void add_to_queue(EdgeQueue* edge_queue); - static void process_roots(OopClosure* closure); + RootSetClosure(Delegate* delegate); + void process(); virtual void do_oop(oop* reference); virtual void do_oop(narrowOop* reference); diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "jfr/jfrEvents.hpp" +#include "jfr/leakprofiler/chains/edgeStore.hpp" +#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp" +#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp" +#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" +#include "jfr/leakprofiler/sampling/objectSample.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "logging/log.hpp" +#include "memory/resourceArea.hpp" +#include "oops/markOop.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/thread.inline.hpp" +#include "runtime/vmThread.hpp" + +EventEmitter::EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time) : + _start_time(start_time), + _end_time(end_time), + _thread(Thread::current()), + _jfr_thread_local(_thread->jfr_thread_local()), + _thread_id(_thread->jfr_thread_local()->thread_id()) {} + +EventEmitter::~EventEmitter() { + // restore / reset thread local stack trace and thread id + _jfr_thread_local->set_thread_id(_thread_id); + _jfr_thread_local->clear_cached_stack_trace(); +} + +void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all) { + assert(sampler != NULL, "invariant"); + + ResourceMark rm; + EdgeStore edge_store; + if (cutoff_ticks <= 0) { + // no reference chains + JfrTicks time_stamp = JfrTicks::now(); + EventEmitter emitter(time_stamp, time_stamp); + emitter.write_events(sampler, &edge_store, emit_all); + return; + } + // events emitted with reference chains require a safepoint operation + PathToGcRootsOperation op(sampler, &edge_store, cutoff_ticks, emit_all); + VMThread::execute(&op); +} + +size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) { + assert(_thread == Thread::current(), "invariant"); + assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant"); + assert(object_sampler != NULL, "invariant"); + assert(edge_store != NULL, "invariant"); + + const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); + size_t count = 0; + + const ObjectSample* current = object_sampler->first(); + while (current != NULL) { + ObjectSample* prev = current->prev(); + if (current->is_alive_and_older_than(last_sweep)) { + write_event(current, edge_store); + ++count; + } + current = prev; + } + + if (count > 0) { + // serialize associated checkpoints and potential chains + ObjectSampleCheckpoint::write(object_sampler, edge_store, emit_all, _thread); + } + return count; +} + +static int array_size(const oop object) { + assert(object != NULL, "invariant"); + if (object->is_array()) { + return arrayOop(object)->length(); + } + return min_jint; +} + +void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) { + assert(sample != NULL, "invariant"); + assert(!sample->is_dead(), "invariant"); + assert(edge_store != NULL, "invariant"); + assert(_jfr_thread_local != NULL, "invariant"); + + const oop* object_addr = sample->object_addr(); + traceid gc_root_id = 0; + const Edge* edge = NULL; + if (SafepointSynchronize::is_at_safepoint()) { + edge = (const Edge*)(*object_addr)->mark(); + } + if (edge == NULL) { + // In order to dump out a representation of the event + // even though it was not reachable / too long to reach, + // we need to register a top level edge for this object. + edge = edge_store->put(object_addr); + } else { + gc_root_id = edge_store->gc_root_id(edge); + } + + assert(edge != NULL, "invariant"); + const traceid object_id = edge_store->get_id(edge); + assert(object_id != 0, "invariant"); + + EventOldObjectSample e(UNTIMED); + e.set_starttime(_start_time); + e.set_endtime(_end_time); + e.set_allocationTime(sample->allocation_time()); + e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc()); + e.set_object(object_id); + e.set_arrayElements(array_size(edge->pointee())); + e.set_root(gc_root_id); + + // Temporarily assigning both the stack trace id and thread id + // onto the thread local data structure of the emitter thread (for the duration + // of the commit() call). This trick provides a means to override + // the event generation mechanism by injecting externally provided id's. + // At this particular location, it allows us to emit an old object event + // supplying information from where the actual sampling occurred. + _jfr_thread_local->set_cached_stack_trace_id(sample->stack_trace_id()); + assert(sample->has_thread(), "invariant"); + _jfr_thread_local->set_thread_id(sample->thread_id()); + e.commit(); +} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP +#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP + +#include "memory/allocation.hpp" +#include "jfr/utilities/jfrTime.hpp" + +typedef u8 traceid; + +class EdgeStore; +class JfrThreadLocal; +class ObjectSample; +class ObjectSampler; +class Thread; + +class EventEmitter : public CHeapObj { + friend class LeakProfiler; + friend class PathToGcRootsOperation; + private: + const JfrTicks& _start_time; + const JfrTicks& _end_time; + Thread* _thread; + JfrThreadLocal* _jfr_thread_local; + traceid _thread_id; + + EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time); + ~EventEmitter(); + + void write_event(const ObjectSample* sample, EdgeStore* edge_store); + size_t write_events(ObjectSampler* sampler, EdgeStore* store, bool emit_all); + + static void emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all); +}; + +#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -24,10 +24,6 @@ #include "precompiled.hpp" #include "jfr/jfrEvents.hpp" -#include "jfr/recorder/jfrRecorder.hpp" -#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" -#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp" -#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" #include "jfr/leakprofiler/chains/edgeStore.hpp" #include "jfr/leakprofiler/chains/objectSampleMarker.hpp" #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" @@ -37,12 +33,129 @@ #include "jfr/leakprofiler/sampling/objectSampler.hpp" #include "jfr/leakprofiler/utilities/rootType.hpp" #include "jfr/metadata/jfrSerializer.hpp" -#include "runtime/interfaceSupport.inline.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/thread.inline.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" +#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp" +#include "jfr/recorder/service/jfrOptionSet.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "jfr/utilities/jfrTypes.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/thread.hpp" +#include "utilities/growableArray.hpp" + +static bool predicate(GrowableArray* set, traceid id) { + assert(set != NULL, "invariant"); + bool found = false; + set->find_sorted(id, found); + return found; +} + +static bool mutable_predicate(GrowableArray* set, traceid id) { + assert(set != NULL, "invariant"); + bool found = false; + const int location = set->find_sorted(id, found); + if (!found) { + set->insert_before(location, id); + } + return found; +} + +static bool add(GrowableArray* set, traceid id) { + assert(set != NULL, "invariant"); + return mutable_predicate(set, id); +} + +const int initial_array_size = 256; + +template +static GrowableArray* c_heap_allocate_array(int size = initial_array_size) { + return new (ResourceObj::C_HEAP, mtTracing) GrowableArray(size, true, mtTracing); +} + +template +static GrowableArray* resource_allocate_array(int size = initial_array_size) { + return new GrowableArray(size); +} + +static void sort_array(GrowableArray* ar) { + assert(ar != NULL, "invariant"); + ar->sort(sort_traceid); +} + +static GrowableArray* unloaded_thread_id_set = NULL; + +class ThreadIdExclusiveAccess : public StackObj { + private: + static Semaphore _mutex_semaphore; + public: + ThreadIdExclusiveAccess() { _mutex_semaphore.wait(); } + ~ThreadIdExclusiveAccess() { _mutex_semaphore.signal(); } +}; + +Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1); -template -static void do_samples(ObjectSample* sample, const ObjectSample* const end, SampleProcessor& processor) { +static void add_to_unloaded_thread_set(traceid tid) { + ThreadIdExclusiveAccess lock; + if (unloaded_thread_id_set == NULL) { + unloaded_thread_id_set = c_heap_allocate_array(); + } + add(unloaded_thread_id_set, tid); +} + +static bool has_thread_exited(traceid tid) { + assert(tid != 0, "invariant"); + return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid); +} + +static GrowableArray* unloaded_set = NULL; + +static void sort_unloaded_set() { + if (unloaded_set != NULL) { + sort_array(unloaded_set); + } +} + +static void add_to_unloaded_set(traceid klass_id) { + if (unloaded_set == NULL) { + unloaded_set = c_heap_allocate_array(); + } + unloaded_set->append(klass_id); +} + +void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) { + assert(k != NULL, "invariant"); + add_to_unloaded_set(TRACE_ID(k)); +} + +static bool is_klass_unloaded(traceid klass_id) { + return unloaded_set != NULL && predicate(unloaded_set, klass_id); +} + +static GrowableArray* id_set = NULL; +static GrowableArray* stack_trace_id_set = NULL; + +static bool is_processed(traceid id) { + assert(id != 0, "invariant"); + assert(id_set != NULL, "invariant"); + return mutable_predicate(id_set, id); +} + +static bool is_processed_or_unloaded(traceid klass_id) { + assert(klass_id != 0, "invariant"); + return is_processed(klass_id) || is_klass_unloaded(klass_id); +} + +static bool should_process(traceid klass_id) { + return klass_id != 0 && !is_processed_or_unloaded(klass_id); +} + +static bool is_stack_trace_processed(traceid stack_trace_id) { + assert(stack_trace_id != 0, "invariant"); + assert(stack_trace_id_set != NULL, "invariant"); + return mutable_predicate(stack_trace_id_set, stack_trace_id); +} + +template +static void do_samples(ObjectSample* sample, const ObjectSample* const end, Processor& processor) { assert(sample != NULL, "invariant"); while (sample != end) { processor.sample_do(sample); @@ -50,6 +163,298 @@ } } +template +static void iterate_samples(Processor& processor, bool all = false, bool update_last_resolved = false) { + ObjectSampler* const sampler = ObjectSampler::sampler(); + assert(sampler != NULL, "invariant"); + ObjectSample* const last = sampler->last(); + assert(last != NULL, "invariant"); + do_samples(last, all ? NULL : sampler->last_resolved(), processor); + if (update_last_resolved) { + sampler->set_last_resolved(last); + } +} + +void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) { + assert(jt != NULL, "invariant"); + if (LeakProfiler::is_running()) { + add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id()); + } +} + +class CheckpointInstall { + private: + const JfrCheckpointBlobHandle& _cp; + public: + CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {} + void sample_do(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (!sample->is_dead()) { + sample->set_klass_checkpoint(_cp); + } + } +}; + +static void install_blob(JfrCheckpointWriter& writer) { + assert(writer.has_data(), "invariant"); + const JfrCheckpointBlobHandle h_cp = writer.copy(); + CheckpointInstall install(h_cp); + iterate_samples(install, true, false); +} + +void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(LeakProfiler::is_running(), "invariant"); + if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) { + install_blob(writer); + } +} + +class ObjectResolver { + public: + ObjectResolver() {} + void sample_do(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + const traceid klass_id = sample->_klass_id; + if (klass_id != 0 || sample->is_dead() || is_klass_unloaded(klass_id)) { + return; + } + sample->_klass_id = JfrTraceId::use(sample->klass()); + } +}; + +void ObjectSampleCheckpoint::resolve_sampled_objects() { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(LeakProfiler::is_running(), "invariant"); + if (ObjectSampler::sampler()->last() == NULL) { + return; + } + ObjectResolver resolver; + iterate_samples(resolver, false, true); +} + +class SampleMark { + private: + ObjectSampleMarker& _marker; + jlong _last_sweep; + int _count; + public: + SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), _last_sweep(last_sweep), _count(0) {} + void sample_do(ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (sample->is_alive_and_older_than(_last_sweep)) { + _marker.mark(sample->object()); + ++_count; + } + } + int count() const { + return _count; + } +}; + +int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) { + assert(sampler != NULL, "invariant"); + if (sampler->last() == NULL) { + return 0; + } + SampleMark mark(marker, emit_all ? max_jlong : sampler->last_sweep().value()); + iterate_samples(mark, true, false); + return mark.count(); +} + +void ObjectSampleCheckpoint::tag(const ObjectSample* sample) { + assert(sample != NULL, "invariant"); + const traceid klass_id = sample->_klass_id; + if (should_process(sample->_klass_id)) { + JfrTraceId::use(sample->klass()); + } +} + +#ifdef ASSERT +static traceid get_klass_id(const Klass* k) { + assert(k != NULL, "invariant"); + return TRACE_ID(k); +} +#endif + +static traceid get_klass_id(traceid method_id) { + assert(method_id != 0, "invariant"); + return method_id >> TRACE_ID_SHIFT; +} + +static int get_method_id_num(traceid method_id) { + return (int)(method_id & METHOD_ID_NUM_MASK); +} + +static Method* lookup_method_in_klasses(Klass* klass, int orig_method_id_num) { + assert(klass != NULL, "invariant"); + assert(!is_klass_unloaded(get_klass_id(klass)), "invariant"); + while (klass != NULL) { + if (klass->is_instance_klass()) { + Method* const m = InstanceKlass::cast(klass)->method_with_orig_idnum(orig_method_id_num); + if (m != NULL) { + return m; + } + } + klass = klass->super(); + } + return NULL; +} + +static Method* lookup_method_in_interfaces(Klass* klass, int orig_method_id_num) { + assert(klass != NULL, "invariant"); + const Array* const all_ifs = InstanceKlass::cast(klass)->transitive_interfaces(); + const int num_ifs = all_ifs->length(); + for (int i = 0; i < num_ifs; i++) { + InstanceKlass* const ik = all_ifs->at(i); + Method* const m = ik->method_with_orig_idnum(orig_method_id_num); + if (m != NULL) { + return m; + } + } + return NULL; +} + +static Method* lookup_method(Klass* klass, int orig_method_id_num) { + Method* m = lookup_method_in_klasses(klass, orig_method_id_num); + if (m == NULL) { + m = lookup_method_in_interfaces(klass, orig_method_id_num); + } + assert(m != NULL, "invariant"); + return m; +} + +static void write_stack_trace(traceid id, bool reached_root, u4 nr_of_frames, JfrCheckpointWriter* writer) { + assert(writer != NULL, "invariant"); + writer->write(id); + writer->write((u1)!reached_root); + writer->write(nr_of_frames); +} + +static void write_stack_frame(const JfrStackFrame* frame, JfrCheckpointWriter* writer) { + assert(frame != NULL, "invariant"); + frame->write(*writer); +} + +bool ObjectSampleCheckpoint::tag(const JfrStackTrace* trace, JfrCheckpointWriter* writer /* NULL */) { + assert(trace != NULL, "invariant"); + if (is_stack_trace_processed(trace->id())) { + return false; + } + if (writer != NULL) { + // JfrStackTrace + write_stack_trace(trace->id(), trace->_reached_root, trace->_nr_of_frames, writer); + } + traceid last_id = 0; + for (u4 i = 0; i < trace->_nr_of_frames; ++i) { + if (writer != NULL) { + // JfrStackFrame(s) + write_stack_frame(&trace->_frames[i], writer); + } + const traceid method_id = trace->_frames[i]._methodid; + if (last_id == method_id || is_processed(method_id) || is_klass_unloaded(get_klass_id(method_id))) { + continue; + } + last_id = method_id; + InstanceKlass* const ik = trace->_frames[i]._klass; + assert(ik != NULL, "invariant"); + JfrTraceId::use(ik, lookup_method(ik, get_method_id_num(method_id))); + } + return true; +} + +static bool stack_trace_precondition(const ObjectSample* sample) { + assert(sample != NULL, "invariant"); + return sample->has_stack_trace_id() && !sample->is_dead(); +} + +class Tagger { + private: + JfrStackTraceRepository& _stack_trace_repo; + public: + Tagger(JfrStackTraceRepository& stack_trace_repo) : _stack_trace_repo(stack_trace_repo) {} + void sample_do(ObjectSample* sample) { + ObjectSampleCheckpoint::tag(sample); + if (stack_trace_precondition(sample)) { + assert(sample->stack_trace_id() == sample->stack_trace()->id(), "invariant"); + ObjectSampleCheckpoint::tag(sample->stack_trace(), NULL); + } + } +}; + +static void tag_old_traces(ObjectSample* last_resolved, JfrStackTraceRepository& stack_trace_repo) { + assert(last_resolved != NULL, "invariant"); + assert(stack_trace_id_set != NULL, "invariant"); + assert(stack_trace_id_set->is_empty(), "invariant"); + Tagger tagger(stack_trace_repo); + do_samples(last_resolved, NULL, tagger); +} + +class StackTraceInstall { + private: + JfrStackTraceRepository& _stack_trace_repo; + public: + StackTraceInstall(JfrStackTraceRepository& stack_trace_repo) : _stack_trace_repo(stack_trace_repo) {} + void install_to_sample(ObjectSample* sample, const JfrStackTrace* stack_trace); + void sample_do(ObjectSample* sample) { + ObjectSampleCheckpoint::tag(sample); + if (stack_trace_precondition(sample)) { + install_to_sample(sample, _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id())); + } + } +}; + +#ifdef ASSERT +static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* trace) { + assert(sample != NULL, "invariant"); + assert(trace != NULL, "invariant"); + assert(trace->hash() == sample->stack_trace_hash(), "invariant"); + assert(trace->id() == sample->stack_trace_id(), "invariant"); +} +#endif + +void StackTraceInstall::install_to_sample(ObjectSample* sample, const JfrStackTrace* stack_trace) { + assert(sample != NULL, "invariant"); + assert(stack_trace != NULL, "invariant"); + DEBUG_ONLY(validate_stack_trace(sample, stack_trace)); + JfrStackTrace* const sample_trace = const_cast(sample->stack_trace()); + if (sample_trace != NULL) { + *sample_trace = *stack_trace; // copy + } else { + sample->set_stack_trace(new JfrStackTrace(stack_trace->id(), *stack_trace, NULL)); // new + } + assert(sample->stack_trace() != NULL, "invariant"); +} + +static void install_new_stack_traces(JfrStackTraceRepository& stack_trace_repo) { + StackTraceInstall stack_trace_install(stack_trace_repo); + iterate_samples(stack_trace_install); + stack_trace_id_set->clear(); +} + +static void allocate_traceid_working_sets() { + const int set_size = JfrOptionSet::old_object_queue_size(); + stack_trace_id_set = resource_allocate_array(set_size); + id_set = resource_allocate_array(set_size); + sort_unloaded_set(); +} + +// caller needs ResourceMark +void ObjectSampleCheckpoint::rotate(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) { + assert(sampler != NULL, "invariant"); + assert(LeakProfiler::is_running(), "invariant"); + if (sampler->last() == NULL) { + // nothing to process + return; + } + allocate_traceid_working_sets(); + install_new_stack_traces(stack_trace_repo); + ObjectSample* const last_resolved = const_cast(sampler->last_resolved()); + if (last_resolved != NULL) { + tag_old_traces(last_resolved, stack_trace_repo); + } +} + class RootSystemType : public JfrSerializer { public: void serialize(JfrCheckpointWriter& writer) { @@ -74,247 +479,138 @@ } }; -class CheckpointInstall { - private: - const JfrCheckpointBlobHandle& _cp; - public: - CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {} - void sample_do(ObjectSample* sample) { - assert(sample != NULL, "invariant"); - if (!sample->is_dead()) { - sample->set_klass_checkpoint(_cp); - } +static void register_serializers() { + static bool is_registered = false; + if (!is_registered) { + JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, true, new RootSystemType()); + JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, true, new RootType()); + is_registered = true; + } +} + +static void reset_blob_write_state(const ObjectSample* sample) { + assert(sample != NULL, "invariant"); + if (sample->has_thread_checkpoint()) { + sample->thread_checkpoint()->reset_write_state(); + } + if (sample->has_klass_checkpoint()) { + sample->klass_checkpoint()->reset_write_state(); } -}; +} + +static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) { + if (sample->has_thread_checkpoint() && has_thread_exited(sample->thread_id())) { + sample->thread_checkpoint()->exclusive_write(writer); + } +} + +static void write_klass_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) { + if (sample->has_klass_checkpoint()) { + sample->klass_checkpoint()->exclusive_write(writer); + } +} + +static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer) { + assert(sample != NULL, "invariant"); + write_thread_blob(sample, writer); + write_klass_blob(sample, writer); +} class CheckpointWrite { private: + const ObjectSampler* _sampler; JfrCheckpointWriter& _writer; const jlong _last_sweep; public: - CheckpointWrite(JfrCheckpointWriter& writer, jlong last_sweep) : _writer(writer), _last_sweep(last_sweep) {} + CheckpointWrite(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) : + _sampler(sampler), _writer(writer), _last_sweep(last_sweep) {} void sample_do(ObjectSample* sample) { assert(sample != NULL, "invariant"); if (sample->is_alive_and_older_than(_last_sweep)) { - if (sample->has_thread_checkpoint()) { - const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint(); - thread_cp->exclusive_write(_writer); - } - if (sample->has_klass_checkpoint()) { - const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint(); - klass_cp->exclusive_write(_writer); - } + write_blobs(sample, _writer); } } }; class CheckpointStateReset { private: + const ObjectSampler* _sampler; const jlong _last_sweep; public: - CheckpointStateReset(jlong last_sweep) : _last_sweep(last_sweep) {} + CheckpointStateReset(const ObjectSampler* sampler, jlong last_sweep) : _sampler(sampler), _last_sweep(last_sweep) {} void sample_do(ObjectSample* sample) { assert(sample != NULL, "invariant"); if (sample->is_alive_and_older_than(_last_sweep)) { - if (sample->has_thread_checkpoint()) { - const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint(); - thread_cp->reset_write_state(); - } - if (sample->has_klass_checkpoint()) { - const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint(); - klass_cp->reset_write_state(); - } + reset_blob_write_state(sample); } } }; +static void reset_write_state_for_blobs(const ObjectSampler* sampler, jlong last_sweep) { + CheckpointStateReset state_reset(sampler, last_sweep); + iterate_samples(state_reset, true, false); +} + +static void write_sample_blobs(const ObjectSampler* sampler, jlong last_sweep, Thread* thread) { + JfrCheckpointWriter writer(thread, false); + CheckpointWrite checkpoint_write(sampler, writer, last_sweep); + iterate_samples(checkpoint_write, true, false); + reset_write_state_for_blobs(sampler, last_sweep); +} + class StackTraceWrite { private: JfrStackTraceRepository& _stack_trace_repo; JfrCheckpointWriter& _writer; + const jlong _last_sweep; int _count; public: - StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer) : - _stack_trace_repo(stack_trace_repo), _writer(writer), _count(0) { - JfrStacktrace_lock->lock_without_safepoint_check(); - } - ~StackTraceWrite() { - assert(JfrStacktrace_lock->owned_by_self(), "invariant"); - JfrStacktrace_lock->unlock(); - } - + StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer, jlong last_sweep) : + _stack_trace_repo(stack_trace_repo), _writer(writer), _last_sweep(last_sweep), _count(0) {} void sample_do(ObjectSample* sample) { - assert(sample != NULL, "invariant"); - if (!sample->is_dead()) { - if (sample->has_stack_trace()) { - JfrTraceId::use(sample->klass(), true); - _stack_trace_repo.write(_writer, sample->stack_trace_id(), sample->stack_trace_hash()); + ObjectSampleCheckpoint::tag(sample); + if (stack_trace_precondition(sample) && sample->is_alive_and_older_than(_last_sweep)) { + assert(sample->stack_trace_id() == sample->stack_trace()->id(), "invariant"); + if (ObjectSampleCheckpoint::tag(sample->stack_trace(), &_writer)) { ++_count; } } } - - int count() const { - return _count; - } -}; - -class SampleMark { - private: - ObjectSampleMarker& _marker; - jlong _last_sweep; - int _count; - public: - SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), - _last_sweep(last_sweep), - _count(0) {} - void sample_do(ObjectSample* sample) { - assert(sample != NULL, "invariant"); - if (sample->is_alive_and_older_than(_last_sweep)) { - _marker.mark(sample->object()); - ++_count; - } - } - int count() const { return _count; } }; -void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) { - assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant"); - - if (!writer.has_data()) { - if (!class_unload) { - LeakProfiler::resume(); - } - assert(LeakProfiler::is_running(), "invariant"); +static void write_and_tag_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepository& repo, jlong last_sweep, Thread* thread) { + assert(sampler != NULL, "invariant"); + allocate_traceid_working_sets(); + install_new_stack_traces(repo); + JfrCheckpointWriter writer(thread); + const JfrCheckpointContext ctx = writer.context(); + writer.write_type(TYPE_STACKTRACE); + const jlong count_offset = writer.reserve(sizeof(u4)); + StackTraceWrite sw(repo, writer, last_sweep); + do_samples(sampler->last(), NULL, sw); + if (sw.count() == 0) { + writer.set_context(ctx); return; } - - assert(writer.has_data(), "invariant"); - const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob(); - - const ObjectSampler* const object_sampler = LeakProfiler::object_sampler(); - assert(object_sampler != NULL, "invariant"); - - ObjectSample* const last = const_cast(object_sampler->last()); - const ObjectSample* const last_resolved = object_sampler->last_resolved(); - CheckpointInstall install(h_cp); - - if (class_unload) { - if (last != NULL) { - // all samples need the class unload information - do_samples(last, NULL, install); - } - assert(LeakProfiler::is_running(), "invariant"); - return; - } - - // only new samples since last resolved checkpoint - if (last != last_resolved) { - do_samples(last, last_resolved, install); - if (resume) { - const_cast(object_sampler)->set_last_resolved(last); - } - } - assert(LeakProfiler::is_suspended(), "invariant"); - if (resume) { - LeakProfiler::resume(); - assert(LeakProfiler::is_running(), "invariant"); - } + writer.write_count((u4)sw.count(), count_offset); } -void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) { +void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) { + assert(sampler != NULL, "invariant"); assert(edge_store != NULL, "invariant"); assert(thread != NULL, "invariant"); - static bool types_registered = false; - if (!types_registered) { - JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, true, new RootSystemType()); - JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, true, new RootType()); - types_registered = true; - } - const ObjectSampler* const object_sampler = LeakProfiler::object_sampler(); - assert(object_sampler != NULL, "invariant"); - const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); - ObjectSample* const last = const_cast(object_sampler->last()); - { - JfrCheckpointWriter writer(false, false, thread); - CheckpointWrite checkpoint_write(writer, last_sweep); - do_samples(last, NULL, checkpoint_write); - } - CheckpointStateReset state_reset(last_sweep); - do_samples(last, NULL, state_reset); + register_serializers(); + // sample set is predicated on time of last sweep + const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value(); + write_and_tag_stack_traces(sampler, JfrStackTraceRepository::instance(), last_sweep, thread); + write_sample_blobs(sampler, last_sweep, thread); + // write reference chains if (!edge_store->is_empty()) { - // java object and chain representations - JfrCheckpointWriter writer(false, true, thread); + JfrCheckpointWriter writer(thread); ObjectSampleWriter osw(writer, edge_store); - edge_store->iterate_edges(osw); + edge_store->iterate(osw); } } - -WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) : - _stack_trace_repo(repo) { -} - -bool WriteObjectSampleStacktrace::process() { - assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - if (!LeakProfiler::is_running()) { - return true; - } - // Suspend the LeakProfiler subsystem - // to ensure stable samples even - // after we return from the safepoint. - LeakProfiler::suspend(); - assert(!LeakProfiler::is_running(), "invariant"); - assert(LeakProfiler::is_suspended(), "invariant"); - - const ObjectSampler* object_sampler = LeakProfiler::object_sampler(); - assert(object_sampler != NULL, "invariant"); - assert(LeakProfiler::is_suspended(), "invariant"); - - ObjectSample* const last = const_cast(object_sampler->last()); - const ObjectSample* const last_resolved = object_sampler->last_resolved(); - if (last == last_resolved) { - assert(LeakProfiler::is_suspended(), "invariant"); - return true; - } - - JfrCheckpointWriter writer(false, true, Thread::current()); - const JfrCheckpointContext ctx = writer.context(); - - writer.write_type(TYPE_STACKTRACE); - const jlong count_offset = writer.reserve(sizeof(u4)); - - int count = 0; - { - StackTraceWrite stack_trace_write(_stack_trace_repo, writer); // JfrStacktrace_lock - do_samples(last, last_resolved, stack_trace_write); - count = stack_trace_write.count(); - } - if (count == 0) { - writer.set_context(ctx); - assert(LeakProfiler::is_suspended(), "invariant"); - return true; - } - assert(count > 0, "invariant"); - writer.write_count((u4)count, count_offset); - JfrStackTraceRepository::write_metadata(writer); - - ObjectSampleCheckpoint::install(writer, false, false); - assert(LeakProfiler::is_suspended(), "invariant"); - return true; -} - -int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) { - const ObjectSampler* object_sampler = LeakProfiler::object_sampler(); - assert(object_sampler != NULL, "invariant"); - ObjectSample* const last = const_cast(object_sampler->last()); - if (last == NULL) { - return 0; - } - const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); - SampleMark mark(marker, last_sweep); - do_samples(last, NULL, mark); - return mark.count(); -} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -26,26 +26,29 @@ #define SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP #include "memory/allocation.hpp" -#include "utilities/exceptions.hpp" class EdgeStore; -class JfrStackTraceRepository; +class Klass; +class JavaThread; class JfrCheckpointWriter; +class JfrStackTrace; +class JfrStackTraceRepository; +class ObjectSample; class ObjectSampleMarker; +class ObjectSampler; +class Thread; class ObjectSampleCheckpoint : AllStatic { public: - static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume); - static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread); - static int mark(ObjectSampleMarker& marker, bool emit_all); -}; - -class WriteObjectSampleStacktrace : public StackObj { - private: - JfrStackTraceRepository& _stack_trace_repo; - public: - WriteObjectSampleStacktrace(JfrStackTraceRepository& repo); - bool process(); + static void on_klass_unload(const Klass* k); + static void on_type_set_unload(JfrCheckpointWriter& writer); + static void on_thread_exit(JavaThread* jt); + static void resolve_sampled_objects(); + static void rotate(const ObjectSampler* sampler, JfrStackTraceRepository& repo); + static void tag(const ObjectSample* sample); + static bool tag(const JfrStackTrace* trace, JfrCheckpointWriter* writer = NULL); + static int save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all); + static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread); }; #endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,8 +33,7 @@ #include "jfr/leakprofiler/sampling/objectSampler.hpp" #include "jfr/leakprofiler/utilities/rootType.hpp" #include "jfr/leakprofiler/utilities/unifiedOop.hpp" -#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp" -#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp" +#include "jfr/writers/jfrTypeWriterHost.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "utilities/growableArray.hpp" @@ -159,6 +158,11 @@ return stored->_field_modifiers == query->_field_modifiers; } + void unlink(FieldInfoEntry* entry) { + assert(entry != NULL, "invariant"); + // nothing + } + public: FieldTable() : _table(new FieldInfoTable(this)) {} ~FieldTable() { @@ -196,7 +200,7 @@ static FieldTable* field_infos = NULL; static RootDescriptionInfo* root_infos = NULL; -int __write_sample_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* si) { +int __write_sample_info__(JfrCheckpointWriter* writer, const void* si) { assert(writer != NULL, "invariant"); assert(si != NULL, "invariant"); const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si; @@ -211,17 +215,17 @@ return 1; } -typedef JfrArtifactWriterImplHost SampleWriterImpl; -typedef JfrArtifactWriterHost SampleWriter; +typedef JfrTypeWriterImplHost SampleWriterImpl; +typedef JfrTypeWriterHost SampleWriter; static void write_sample_infos(JfrCheckpointWriter& writer) { if (sample_infos != NULL) { - SampleWriter sw(&writer, NULL, false); + SampleWriter sw(&writer); sample_infos->iterate(sw); } } -int __write_reference_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ri) { +int __write_reference_info__(JfrCheckpointWriter* writer, const void* ri) { assert(writer != NULL, "invariant"); assert(ri != NULL, "invariant"); const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri; @@ -233,17 +237,17 @@ return 1; } -typedef JfrArtifactWriterImplHost ReferenceWriterImpl; -typedef JfrArtifactWriterHost ReferenceWriter; +typedef JfrTypeWriterImplHost ReferenceWriterImpl; +typedef JfrTypeWriterHost ReferenceWriter; static void write_reference_infos(JfrCheckpointWriter& writer) { if (ref_infos != NULL) { - ReferenceWriter rw(&writer, NULL, false); + ReferenceWriter rw(&writer); ref_infos->iterate(rw); } } -int __write_array_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ai) { +int __write_array_info__(JfrCheckpointWriter* writer, const void* ai) { assert(writer != NULL, "invariant"); assert(ai != NULL, "invariant"); const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai; @@ -270,17 +274,17 @@ return array_infos->store(osai); } -typedef JfrArtifactWriterImplHost ArrayWriterImpl; -typedef JfrArtifactWriterHost ArrayWriter; +typedef JfrTypeWriterImplHost ArrayWriterImpl; +typedef JfrTypeWriterHost ArrayWriter; static void write_array_infos(JfrCheckpointWriter& writer) { if (array_infos != NULL) { - ArrayWriter aw(&writer, NULL, false); + ArrayWriter aw(&writer); array_infos->iterate(aw); } } -int __write_field_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* fi) { +int __write_field_info__(JfrCheckpointWriter* writer, const void* fi) { assert(writer != NULL, "invariant"); assert(fi != NULL, "invariant"); const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi; @@ -314,12 +318,12 @@ return field_infos->store(osfi); } -typedef JfrArtifactWriterImplHost FieldWriterImpl; -typedef JfrArtifactWriterHost FieldWriter; +typedef JfrTypeWriterImplHost FieldWriterImpl; +typedef JfrTypeWriterHost FieldWriter; static void write_field_infos(JfrCheckpointWriter& writer) { if (field_infos != NULL) { - FieldWriter fw(&writer, NULL, false); + FieldWriter fw(&writer); field_infos->iterate(fw); } } @@ -339,7 +343,7 @@ return description.description(); } -int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* di) { +int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di) { assert(writer != NULL, "invariant"); assert(di != NULL, "invariant"); const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di; @@ -350,7 +354,7 @@ return 1; } -static traceid get_root_description_info_id(const Edge& edge, traceid id) { +static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) { assert(edge.is_root(), "invariant"); if (EdgeUtils::is_leak_edge(edge)) { return 0; @@ -366,8 +370,8 @@ return root_infos->store(oodi); } -typedef JfrArtifactWriterImplHost RootDescriptionWriterImpl; -typedef JfrArtifactWriterHost RootDescriptionWriter; +typedef JfrTypeWriterImplHost RootDescriptionWriterImpl; +typedef JfrTypeWriterHost RootDescriptionWriter; int _edge_reference_compare_(uintptr_t lhs, uintptr_t rhs) { @@ -513,12 +517,12 @@ RootResolutionSet rrs(root_infos); RootResolver::resolve(rrs); // write roots - RootDescriptionWriter rw(&writer, NULL, false); + RootDescriptionWriter rw(&writer); root_infos->iterate(rw); } } -static void add_old_object_sample_info(const Edge* current, traceid id) { +static void add_old_object_sample_info(const StoredEdge* current, traceid id) { assert(current != NULL, "invariant"); if (sample_infos == NULL) { sample_infos = new SampleInfo(); @@ -528,11 +532,11 @@ assert(oosi != NULL, "invariant"); oosi->_id = id; oosi->_data._object = current->pointee(); - oosi->_data._reference_id = current->is_root() ? (traceid)0 : id; + oosi->_data._reference_id = current->parent() == NULL ? (traceid)0 : id; sample_infos->store(oosi); } -static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) { +static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) { assert(current != NULL, "invariant"); if (ref_infos == NULL) { ref_infos = new RefInfo(); @@ -544,37 +548,43 @@ ri->_id = id; ri->_data._array_info_id = !current->is_skip_edge() ? get_array_info_id(*current, id) : 0; - ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? - get_field_info_id(*current) : (traceid)0; + ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? get_field_info_id(*current) : (traceid)0; ri->_data._old_object_sample_id = parent_id; ri->_data._skip = current->skip_length(); ref_infos->store(ri); } -static traceid add_root_info(const Edge* root, traceid id) { - assert(root != NULL, "invariant"); - assert(root->is_root(), "invariant"); - return get_root_description_info_id(*root, id); +static bool is_gc_root(const StoredEdge* current) { + assert(current != NULL, "invariant"); + return current->parent() == NULL && current->gc_root_id() != 0; } -void ObjectSampleWriter::write(const RoutableEdge* edge) { +static traceid add_gc_root_info(const StoredEdge* root, traceid id) { + assert(root != NULL, "invariant"); + assert(is_gc_root(root), "invariant"); + return get_gc_root_description_info_id(*root, id); +} + +void ObjectSampleWriter::write(const StoredEdge* edge) { assert(edge != NULL, "invariant"); const traceid id = _store->get_id(edge); add_old_object_sample_info(edge, id); - const RoutableEdge* parent = edge->logical_parent(); + const StoredEdge* const parent = edge->parent(); if (parent != NULL) { add_reference_info(edge, id, _store->get_id(parent)); } else { - assert(edge->is_root(), "invariant"); - add_root_info(edge, id); + if (is_gc_root(edge)) { + assert(edge->gc_root_id() == id, "invariant"); + add_gc_root_info(edge, id); + } } } -ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) : +ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) : _writer(writer), _store(store) { assert(store != NULL, "invariant"); - assert(store->number_of_entries() > 0, "invariant"); + assert(!store->is_empty(), "invariant"); sample_infos = NULL; ref_infos = NULL; array_infos = NULL; @@ -590,26 +600,7 @@ write_root_descriptors(_writer); } -void ObjectSampleWriter::write_chain(const RoutableEdge& edge) { - assert(EdgeUtils::is_leak_edge(edge), "invariant"); - if (edge.processed()) { - return; - } - EdgeUtils::collapse_chain(edge); - const RoutableEdge* current = &edge; - while (current != NULL) { - if (current->processed()) { - return; - } - write(current); - current->set_processed(); - current = current->logical_parent(); - } -} - -bool ObjectSampleWriter::operator()(const RoutableEdge& edge) { - if (EdgeUtils::is_leak_edge(edge)) { - write_chain(edge); - } +bool ObjectSampleWriter::operator()(StoredEdge& e) { + write(&e); return true; } diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -30,21 +30,17 @@ class Edge; class EdgeStore; class JfrCheckpointWriter; -class RoutableEdge; +class StoredEdge; class ObjectSampleWriter : public StackObj { private: JfrCheckpointWriter& _writer; - const EdgeStore* const _store; - - void write(const RoutableEdge* edge); - void write_chain(const RoutableEdge& edge); - + EdgeStore* const _store; + void write(const StoredEdge* edge); public: - ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store); + ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store); ~ObjectSampleWriter(); - - bool operator()(const RoutableEdge& edge); + bool operator()(StoredEdge& edge); }; #endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -41,9 +41,6 @@ #include "runtime/vframe_hp.hpp" #include "services/management.hpp" #include "utilities/growableArray.hpp" -#if INCLUDE_JVMCI -#include "jvmci/jvmci.hpp" -#endif class ReferenceLocateClosure : public OopClosure { protected: @@ -106,7 +103,6 @@ bool do_management_roots(); bool do_string_table_roots(); bool do_aot_loader_roots(); - JVMCI_ONLY(bool do_jvmci_roots();) bool do_roots(); @@ -132,7 +128,7 @@ bool ReferenceToRootClosure::do_cldg_roots() { assert(!complete(), "invariant"); ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, NULL); - CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_strong); + CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_none); ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure); return rlc.complete(); } @@ -193,15 +189,6 @@ return rcl.complete(); } -#if INCLUDE_JVMCI -bool ReferenceToRootClosure::do_jvmci_roots() { - assert(!complete(), "invariant"); - ReferenceLocateClosure rcl(_callback, OldObjectRoot::_jvmci, OldObjectRoot::_type_undetermined, NULL); - JVMCI::oops_do(&rcl); - return rcl.complete(); -} -#endif - bool ReferenceToRootClosure::do_roots() { assert(!complete(), "invariant"); assert(OldObjectRoot::_system_undetermined == _info._system, "invariant"); @@ -252,13 +239,6 @@ return true; } -#if INCLUDE_JVMCI - if (do_jvmci_roots()) { - _complete = true; - return true; - } -#endif - return false; } @@ -436,9 +416,6 @@ }; void RootResolver::resolve(RootCallback& callback) { - - // Need to clear cld claim bit before starting - ClassLoaderDataGraph::clear_claimed_marks(); RootResolverMarkScope mark_scope; // thread local roots diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.hpp --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -25,8 +25,8 @@ #ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP #define SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP +#include "jfr/leakprofiler/utilities/rootType.hpp" #include "memory/allocation.hpp" -#include "jfr/leakprofiler/utilities/rootType.hpp" #include "oops/oopsHierarchy.hpp" struct RootCallbackInfo { diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/emitEventOperation.cpp --- a/src/hotspot/share/jfr/leakprofiler/emitEventOperation.cpp Fri Aug 23 18:47:55 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,236 +0,0 @@ -/* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ -#include "precompiled.hpp" -#include "gc/shared/collectedHeap.hpp" -#include "jfr/jfrEvents.hpp" -#include "jfr/leakprofiler/utilities/granularTimer.hpp" -#include "jfr/leakprofiler/chains/rootSetClosure.hpp" -#include "jfr/leakprofiler/chains/edge.hpp" -#include "jfr/leakprofiler/chains/edgeQueue.hpp" -#include "jfr/leakprofiler/chains/edgeStore.hpp" -#include "jfr/leakprofiler/chains/bitset.hpp" -#include "jfr/leakprofiler/sampling/objectSample.hpp" -#include "jfr/leakprofiler/leakProfiler.hpp" -#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" -#include "jfr/leakprofiler/sampling/objectSampler.hpp" -#include "jfr/leakprofiler/emitEventOperation.hpp" -#include "jfr/leakprofiler/chains/bfsClosure.hpp" -#include "jfr/leakprofiler/chains/dfsClosure.hpp" -#include "jfr/leakprofiler/chains/objectSampleMarker.hpp" -#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" -#include "jfr/support/jfrThreadId.hpp" -#include "logging/log.hpp" -#include "memory/resourceArea.hpp" -#include "memory/universe.hpp" -#include "oops/markOop.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/safepoint.hpp" -#include "runtime/vmThread.hpp" -#include "utilities/globalDefinitions.hpp" - -/* The EdgeQueue is backed by directly managed virtual memory. - * We will attempt to dimension an initial reservation - * in proportion to the size of the heap (represented by heap_region). - * Initial memory reservation: 5% of the heap OR at least 32 Mb - * Commit ratio: 1 : 10 (subject to allocation granularties) - */ -static size_t edge_queue_memory_reservation(const MemRegion& heap_region) { - const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M); - assert(memory_reservation_bytes >= (size_t)32*M, "invariant"); - return memory_reservation_bytes; -} - -static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) { - const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10; - assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant"); - return memory_commit_block_size_bytes; -} - -static void log_edge_queue_summary(const EdgeQueue& edge_queue) { - log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K); - log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top()); - log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K); - if (edge_queue.reserved_size() > 0) { - log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n", - ((double)edge_queue.live_set() / (double)edge_queue.reserved_size())); - } -} - -void EmitEventOperation::doit() { - assert(LeakProfiler::is_running(), "invariant"); - _object_sampler = LeakProfiler::object_sampler(); - assert(_object_sampler != NULL, "invariant"); - - _vm_thread = VMThread::vm_thread(); - assert(_vm_thread == Thread::current(), "invariant"); - _vm_thread_local = _vm_thread->jfr_thread_local(); - assert(_vm_thread_local != NULL, "invariant"); - assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant"); - - // The VM_Operation::evaluate() which invoked doit() - // contains a top level ResourceMark - - // save the original markWord for the potential leak objects - // to be restored on function exit - ObjectSampleMarker marker; - if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) { - return; - } - - EdgeStore edge_store; - - GranularTimer::start(_cutoff_ticks, 1000000); - if (_cutoff_ticks <= 0) { - // no chains - write_events(&edge_store); - return; - } - - assert(_cutoff_ticks > 0, "invariant"); - - // The bitset used for marking is dimensioned as a function of the heap size - const MemRegion heap_region = Universe::heap()->reserved_region(); - BitSet mark_bits(heap_region); - - // The edge queue is dimensioned as a fraction of the heap size - const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region); - EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size)); - - // The initialize() routines will attempt to reserve and allocate backing storage memory. - // Failure to accommodate will render root chain processing impossible. - // As a fallback on failure, just write out the existing samples, flat, without chains. - if (!(mark_bits.initialize() && edge_queue.initialize())) { - log_warning(jfr)("Unable to allocate memory for root chain processing"); - write_events(&edge_store); - return; - } - - // necessary condition for attempting a root set iteration - Universe::heap()->ensure_parsability(false); - - RootSetClosure::add_to_queue(&edge_queue); - if (edge_queue.is_full()) { - // Pathological case where roots don't fit in queue - // Do a depth-first search, but mark roots first - // to avoid walking sideways over roots - DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits); - } else { - BFSClosure bfs(&edge_queue, &edge_store, &mark_bits); - bfs.process(); - } - GranularTimer::stop(); - write_events(&edge_store); - log_edge_queue_summary(edge_queue); -} - -int EmitEventOperation::write_events(EdgeStore* edge_store) { - assert(_object_sampler != NULL, "invariant"); - assert(edge_store != NULL, "invariant"); - assert(_vm_thread != NULL, "invariant"); - assert(_vm_thread_local != NULL, "invariant"); - assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - - // save thread id in preparation for thread local trace data manipulations - const traceid vmthread_id = _vm_thread_local->thread_id(); - assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant"); - - const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value(); - int count = 0; - - const ObjectSample* current = _object_sampler->first(); - while (current != NULL) { - ObjectSample* prev = current->prev(); - if (current->is_alive_and_older_than(last_sweep)) { - write_event(current, edge_store); - ++count; - } - current = prev; - } - - // restore thread local stack trace and thread id - _vm_thread_local->set_thread_id(vmthread_id); - _vm_thread_local->clear_cached_stack_trace(); - assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant"); - - if (count > 0) { - // serialize assoicated checkpoints - ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread); - } - return count; -} - -static int array_size(const oop object) { - assert(object != NULL, "invariant"); - if (object->is_array()) { - return arrayOop(object)->length(); - } - return min_jint; -} - -void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) { - assert(sample != NULL, "invariant"); - assert(!sample->is_dead(), "invariant"); - assert(edge_store != NULL, "invariant"); - assert(_vm_thread_local != NULL, "invariant"); - const oop* object_addr = sample->object_addr(); - assert(*object_addr != NULL, "invariant"); - - const Edge* edge = (const Edge*)(*object_addr)->mark(); - traceid gc_root_id = 0; - if (edge == NULL) { - // In order to dump out a representation of the event - // even though it was not reachable / too long to reach, - // we need to register a top level edge for this object - Edge e(NULL, object_addr); - edge_store->add_chain(&e, 1); - edge = (const Edge*)(*object_addr)->mark(); - } else { - gc_root_id = edge_store->get_root_id(edge); - } - - assert(edge != NULL, "invariant"); - assert(edge->pointee() == *object_addr, "invariant"); - const traceid object_id = edge_store->get_id(edge); - assert(object_id != 0, "invariant"); - - EventOldObjectSample e(UNTIMED); - e.set_starttime(GranularTimer::start_time()); - e.set_endtime(GranularTimer::end_time()); - e.set_allocationTime(sample->allocation_time()); - e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc()); - e.set_object(object_id); - e.set_arrayElements(array_size(*object_addr)); - e.set_root(gc_root_id); - - // Temporarily assigning both the stack trace id and thread id - // onto the thread local data structure of the VMThread (for the duration - // of the commit() call). This trick provides a means to override - // the event generation mechanism by injecting externally provided id's. - // Here, in particular, this allows us to emit an old object event - // supplying information from where the actual sampling occurred. - _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id()); - assert(sample->has_thread(), "invariant"); - _vm_thread_local->set_thread_id(sample->thread_id()); - e.commit(); -} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/emitEventOperation.hpp --- a/src/hotspot/share/jfr/leakprofiler/emitEventOperation.hpp Fri Aug 23 18:47:55 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP -#define SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP - -#include "runtime/vmOperations.hpp" - -class BFSClosure; -class EdgeStore; -class EdgeQueue; -class JfrThreadData; -class ObjectSample; -class ObjectSampler; - -class VMThread; - -// Safepoint operation for emitting object sample events -class EmitEventOperation : public VM_Operation { - private: - jlong _cutoff_ticks; - bool _emit_all; - VMThread* _vm_thread; - JfrThreadLocal* _vm_thread_local; - ObjectSampler* _object_sampler; - - void write_event(const ObjectSample* sample, EdgeStore* edge_store); - int write_events(EdgeStore* edge_store); - - public: - EmitEventOperation(jlong cutoff_ticks, bool emit_all) : - _cutoff_ticks(cutoff_ticks), - _emit_all(emit_all), - _vm_thread(NULL), - _vm_thread_local(NULL), - _object_sampler(NULL) { - } - - VMOp_Type type() const { - return VMOp_GC_HeapInspection; - } - - Mode evaluation_mode() const { - return _safepoint; - } - - virtual void doit(); -}; - -#endif // SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp --- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,25 +23,31 @@ */ #include "precompiled.hpp" -#include "jfr/leakprofiler/emitEventOperation.hpp" #include "jfr/leakprofiler/leakProfiler.hpp" #include "jfr/leakprofiler/startOperation.hpp" #include "jfr/leakprofiler/stopOperation.hpp" +#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp" #include "jfr/leakprofiler/sampling/objectSampler.hpp" #include "jfr/recorder/service/jfrOptionSet.hpp" +#include "logging/log.hpp" #include "memory/iterator.hpp" -#include "oops/oop.hpp" -#include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" -#include "utilities/ostream.hpp" + +bool LeakProfiler::is_running() { + return ObjectSampler::is_created(); +} -// Only to be updated during safepoint -ObjectSampler* LeakProfiler::_object_sampler = NULL; +bool LeakProfiler::start(int sample_count) { + if (is_running()) { + return true; + } -static volatile jbyte suspended = 0; -bool LeakProfiler::start(jint sample_count) { + // Allows user to disable leak profiler on command line by setting queue size to zero. + if (sample_count == 0) { + return false; + } + if (UseZGC) { log_warning(jfr)("LeakProfiler is currently not supported in combination with ZGC"); return false; @@ -52,49 +58,56 @@ return false; } - if (_object_sampler != NULL) { - // already started - return true; + assert(!is_running(), "invariant"); + assert(sample_count > 0, "invariant"); + + // schedule the safepoint operation for installing the object sampler + StartOperation op(sample_count); + VMThread::execute(&op); + + if (!is_running()) { + log_trace(jfr, system)("Object sampling could not be started because the sampler could not be allocated"); + return false; } - // Allows user to disable leak profiler on command line by setting queue size to zero. - if (sample_count > 0) { - StartOperation op(sample_count); - VMThread::execute(&op); - return _object_sampler != NULL; - } - return false; + assert(is_running(), "invariant"); + log_trace(jfr, system)("Object sampling started"); + return true; } bool LeakProfiler::stop() { - if (_object_sampler == NULL) { - // already stopped/not started - return true; + if (!is_running()) { + return false; } + + // schedule the safepoint operation for uninstalling and destroying the object sampler StopOperation op; VMThread::execute(&op); - return _object_sampler == NULL; + + assert(!is_running(), "invariant"); + log_trace(jfr, system)("Object sampling stopped"); + return true; } -void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) { +void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all) { if (!is_running()) { return; } - EmitEventOperation op(cutoff_ticks, emit_all); - VMThread::execute(&op); + // exclusive access to object sampler instance + ObjectSampler* const sampler = ObjectSampler::acquire(); + assert(sampler != NULL, "invariant"); + EventEmitter::emit(sampler, cutoff_ticks, emit_all); + ObjectSampler::release(); } void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "Leak Profiler::oops_do(...) may only be called during safepoint"); - - if (_object_sampler != NULL) { - _object_sampler->oops_do(is_alive, f); + if (is_running()) { + ObjectSampler::oops_do(is_alive, f); } } -void LeakProfiler::sample(HeapWord* object, - size_t size, - JavaThread* thread) { +void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) { assert(is_running(), "invariant"); assert(thread != NULL, "invariant"); assert(thread->thread_state() == _thread_in_vm, "invariant"); @@ -104,39 +117,5 @@ return; } - _object_sampler->add(object, size, thread); -} - -ObjectSampler* LeakProfiler::object_sampler() { - assert(is_suspended() || SafepointSynchronize::is_at_safepoint(), - "Leak Profiler::object_sampler() may only be called during safepoint"); - return _object_sampler; -} - -void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) { - assert(SafepointSynchronize::is_at_safepoint(), - "Leak Profiler::set_object_sampler() may only be called during safepoint"); - _object_sampler = object_sampler; -} - -bool LeakProfiler::is_running() { - return _object_sampler != NULL && !suspended; + ObjectSampler::sample(object, size, thread); } - -bool LeakProfiler::is_suspended() { - return _object_sampler != NULL && suspended; -} - -void LeakProfiler::resume() { - assert(is_suspended(), "invariant"); - OrderAccess::storestore(); - Atomic::store((jbyte)0, &suspended); - assert(is_running(), "invariant"); -} - -void LeakProfiler::suspend() { - assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - assert(_object_sampler != NULL, "invariant"); - assert(!is_suspended(), "invariant"); - suspended = (jbyte)1; // safepoint visible -} diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp --- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -28,36 +28,16 @@ #include "memory/allocation.hpp" class BoolObjectClosure; -class ObjectSampler; class OopClosure; class JavaThread; -class Thread; class LeakProfiler : public AllStatic { - friend class ClassUnloadTypeSet; - friend class EmitEventOperation; - friend class ObjectSampleCheckpoint; - friend class StartOperation; - friend class StopOperation; - friend class TypeSet; - friend class WriteObjectSampleStacktrace; - - private: - static ObjectSampler* _object_sampler; - - static void set_object_sampler(ObjectSampler* object_sampler); - static ObjectSampler* object_sampler(); - - static void suspend(); - static void resume(); - static bool is_suspended(); - public: - static bool start(jint sample_count); + static bool start(int sample_count); static bool stop(); - static void emit_events(jlong cutoff_ticks, bool emit_all); static bool is_running(); + static void emit_events(int64_t cutoff_ticks, bool emit_all); static void sample(HeapWord* object, size_t size, JavaThread* thread); // Called by GC diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -26,12 +26,14 @@ #define SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP #include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp" +#include "jfr/recorder/stacktrace/jfrStackTrace.hpp" #include "jfr/utilities/jfrAllocation.hpp" #include "jfr/utilities/jfrTime.hpp" #include "jfr/utilities/jfrTypes.hpp" #include "memory/allocation.hpp" #include "oops/oop.hpp" #include "utilities/ticks.hpp" + /* * Handle for diagnosing Java memory leaks. * @@ -39,17 +41,22 @@ * allocated, the thread and the stack trace. */ class ObjectSample : public JfrCHeapObj { + friend class CheckpointInstall; + friend class ObjectResolver; + friend class ObjectSampleCheckpoint; friend class ObjectSampler; friend class SampleList; private: ObjectSample* _next; ObjectSample* _previous; + mutable const JfrStackTrace* _stack_trace; JfrCheckpointBlobHandle _thread_cp; JfrCheckpointBlobHandle _klass_cp; oop _object; Ticks _allocation_time; traceid _stack_trace_id; traceid _thread_id; + mutable traceid _klass_id; int _index; size_t _span; size_t _allocated; @@ -72,20 +79,29 @@ void reset() { set_stack_trace_id(0); - set_stack_trace_hash(0), + set_stack_trace_hash(0); + _klass_id = 0; release_references(); _dead = false; } + ~ObjectSample() { + if (_stack_trace != NULL) { + delete _stack_trace; + } + } + public: ObjectSample() : _next(NULL), _previous(NULL), + _stack_trace(NULL), _thread_cp(), _klass_cp(), _object(NULL), _allocation_time(), _stack_trace_id(0), _thread_id(0), + _klass_id(0), _index(0), _span(0), _allocated(0), @@ -174,7 +190,7 @@ return _heap_used_at_last_gc; } - bool has_stack_trace() const { + bool has_stack_trace_id() const { return stack_trace_id() != 0; } @@ -194,6 +210,14 @@ _stack_trace_hash = hash; } + const JfrStackTrace* stack_trace() const { + return _stack_trace; + } + + void set_stack_trace(const JfrStackTrace* trace) const { + _stack_trace = trace; + } + bool has_thread() const { return _thread_id != 0; } diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,6 +21,7 @@ * questions. * */ + #include "precompiled.hpp" #include "jfr/jfrEvents.hpp" #include "jfr/leakprofiler/sampling/objectSample.hpp" @@ -35,8 +36,18 @@ #include "logging/log.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.hpp" +#include "runtime/safepoint.hpp" #include "runtime/thread.hpp" +static ObjectSampler* _instance = NULL; + +static ObjectSampler& instance() { + assert(_instance != NULL, "invariant"); + return *_instance; +} + ObjectSampler::ObjectSampler(size_t size) : _priority_queue(new SamplePriorityQueue(size)), _list(new SampleList(size)), @@ -44,7 +55,6 @@ _total_allocated(0), _threshold(0), _size(size), - _tryLock(0), _dead_samples(false) {} ObjectSampler::~ObjectSampler() { @@ -54,32 +64,110 @@ _list = NULL; } -void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) { +bool ObjectSampler::create(size_t size) { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(_instance == NULL, "invariant"); + _instance = new ObjectSampler(size); + return _instance != NULL; +} + +bool ObjectSampler::is_created() { + return _instance != NULL; +} + +ObjectSampler* ObjectSampler::sampler() { + assert(is_created(), "invariant"); + return _instance; +} + +void ObjectSampler::destroy() { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + if (_instance != NULL) { + ObjectSampler* const sampler = _instance; + _instance = NULL; + delete sampler; + } +} + +static volatile int _lock = 0; + +ObjectSampler* ObjectSampler::acquire() { + assert(is_created(), "invariant"); + while (Atomic::cmpxchg(1, &_lock, 0) == 1) {} + return _instance; +} + +void ObjectSampler::release() { + assert(is_created(), "invariant"); + OrderAccess::fence(); + _lock = 0; +} + +static traceid get_thread_id(JavaThread* thread) { assert(thread != NULL, "invariant"); - const traceid thread_id = thread->threadObj() != NULL ? thread->jfr_thread_local()->thread_id() : 0; + if (thread->threadObj() == NULL) { + return 0; + } + const JfrThreadLocal* const tl = thread->jfr_thread_local(); + assert(tl != NULL, "invariant"); + if (!tl->has_thread_checkpoint()) { + JfrCheckpointManager::create_thread_checkpoint(thread); + } + assert(tl->has_thread_checkpoint(), "invariant"); + return tl->thread_id(); +} + +// Populates the thread local stack frames, but does not add them +// to the stacktrace repository (...yet, see stacktrace_id() below) +// +void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) { + assert(stacktrace != NULL, "invariant"); + assert(thread != NULL, "invariant"); + if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) { + JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0); + } +} + +// We were successful in acquiring the try lock and have been selected for adding a sample. +// Go ahead with installing our previously taken stacktrace into the stacktrace repository. +// +traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) { + assert(stacktrace != NULL, "invariant"); + assert(stacktrace->hash() != 0, "invariant"); + const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread); + thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash()); + return stacktrace_id; +} + +void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) { + assert(thread != NULL, "invariant"); + assert(is_created(), "invariant"); + + const traceid thread_id = get_thread_id(thread); if (thread_id == 0) { return; } - assert(thread_id != 0, "invariant"); - - if (!thread->jfr_thread_local()->has_thread_checkpoint()) { - JfrCheckpointManager::create_thread_checkpoint(thread); - assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant"); - } - traceid stack_trace_id = 0; - unsigned int stack_trace_hash = 0; - if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) { - stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash); - thread->jfr_thread_local()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash); - } + const JfrThreadLocal* const tl = thread->jfr_thread_local(); + JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth()); + fill_stacktrace(&stacktrace, thread); - JfrTryLock tryLock(&_tryLock); + // try enter critical section + JfrTryLock tryLock(&_lock); if (!tryLock.has_lock()) { log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention"); return; } + instance().add(obj, allocated, thread_id, &stacktrace, thread); +} + +void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) { + assert(stacktrace != NULL, "invariant"); + assert(thread_id != 0, "invariant"); + assert(thread != NULL, "invariant"); + assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant"); + if (_dead_samples) { scavenge(); assert(!_dead_samples, "invariant"); @@ -101,13 +189,13 @@ } assert(sample != NULL, "invariant"); - assert(thread_id != 0, "invariant"); sample->set_thread_id(thread_id); sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint()); - if (stack_trace_id != 0) { - sample->set_stack_trace_id(stack_trace_id); - sample->set_stack_trace_hash(stack_trace_hash); + const unsigned int stacktrace_hash = stacktrace->hash(); + if (stacktrace_hash != 0) { + sample->set_stack_trace_id(stacktrace_id(stacktrace, thread)); + sample->set_stack_trace_hash(stacktrace_hash); } sample->set_span(allocated); @@ -118,38 +206,16 @@ _priority_queue->push(sample); } -const ObjectSample* ObjectSampler::last() const { - return _list->last(); -} - -const ObjectSample* ObjectSampler::first() const { - return _list->first(); -} - -const ObjectSample* ObjectSampler::last_resolved() const { - return _list->last_resolved(); -} - -void ObjectSampler::set_last_resolved(const ObjectSample* sample) { - _list->set_last_resolved(sample); -} - -void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) { +void ObjectSampler::scavenge() { ObjectSample* current = _list->last(); while (current != NULL) { ObjectSample* next = current->next(); - if (!current->is_dead()) { - if (is_alive->do_object_b(current->object())) { - // The weakly referenced object is alive, update pointer - f->do_oop(const_cast(current->object_addr())); - } else { - current->set_dead(); - _dead_samples = true; - } + if (current->is_dead()) { + remove_dead(current); } current = next; } - _last_sweep = JfrTicks::now(); + _dead_samples = false; } void ObjectSampler::remove_dead(ObjectSample* sample) { @@ -166,16 +232,41 @@ _list->release(sample); } -void ObjectSampler::scavenge() { - ObjectSample* current = _list->last(); +void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) { + assert(is_created(), "invariant"); + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + ObjectSampler& sampler = instance(); + ObjectSample* current = sampler._list->last(); while (current != NULL) { ObjectSample* next = current->next(); - if (current->is_dead()) { - remove_dead(current); + if (!current->is_dead()) { + if (is_alive->do_object_b(current->object())) { + // The weakly referenced object is alive, update pointer + f->do_oop(const_cast(current->object_addr())); + } else { + current->set_dead(); + sampler._dead_samples = true; + } } current = next; } - _dead_samples = false; + sampler._last_sweep = JfrTicks::now(); +} + +ObjectSample* ObjectSampler::last() const { + return _list->last(); +} + +const ObjectSample* ObjectSampler::first() const { + return _list->first(); +} + +const ObjectSample* ObjectSampler::last_resolved() const { + return _list->last_resolved(); +} + +void ObjectSampler::set_last_resolved(const ObjectSample* sample) { + _list->set_last_resolved(sample); } int ObjectSampler::item_count() const { @@ -189,7 +280,7 @@ ObjectSample* ObjectSampler::item_at(int index) { return const_cast( const_cast(this)->item_at(index) - ); + ); } const JfrTicks& ObjectSampler::last_sweep() const { diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -28,23 +28,23 @@ #include "memory/allocation.hpp" #include "jfr/utilities/jfrTime.hpp" +typedef u8 traceid; + class BoolObjectClosure; +class JavaThread; +class JfrStackTrace; class OopClosure; class ObjectSample; -class ObjectSampler; class SampleList; class SamplePriorityQueue; -class Thread; // Class reponsible for holding samples and // making sure the samples are evenly distributed as // new entries are added and removed. class ObjectSampler : public CHeapObj { friend class LeakProfiler; - friend class ObjectSampleCheckpoint; friend class StartOperation; friend class StopOperation; - friend class EmitEventOperation; private: SamplePriorityQueue* _priority_queue; SampleList* _list; @@ -52,25 +52,41 @@ size_t _total_allocated; size_t _threshold; size_t _size; - volatile int _tryLock; bool _dead_samples; + // Lifecycle explicit ObjectSampler(size_t size); ~ObjectSampler(); + static bool create(size_t size); + static bool is_created(); + static void destroy(); - void add(HeapWord* object, size_t size, JavaThread* thread); + // Stacktrace + static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread); + traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread); + + // Sampling + static void sample(HeapWord* object, size_t size, JavaThread* thread); + void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread); + void scavenge(); void remove_dead(ObjectSample* sample); - void scavenge(); // Called by GC - void oops_do(BoolObjectClosure* is_alive, OopClosure* f); + static void oops_do(BoolObjectClosure* is_alive, OopClosure* f); - public: const ObjectSample* item_at(int index) const; ObjectSample* item_at(int index); int item_count() const; + + public: + static ObjectSampler* sampler(); + + // For operations that require exclusive access (non-safepoint) + static ObjectSampler* acquire(); + static void release(); + const ObjectSample* first() const; - const ObjectSample* last() const; + ObjectSample* last() const; const ObjectSample* last_resolved() const; void set_last_resolved(const ObjectSample* sample); const JfrTicks& last_sweep() const; diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/startOperation.hpp --- a/src/hotspot/share/jfr/leakprofiler/startOperation.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/startOperation.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -25,35 +25,18 @@ #ifndef SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP #define SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP -#include "jfr/recorder/jfrRecorder.hpp" -#include "jfr/leakprofiler/leakProfiler.hpp" #include "jfr/leakprofiler/sampling/objectSampler.hpp" -#include "jfr/recorder/service/jfrOptionSet.hpp" -#include "logging/log.hpp" -#include "runtime/vmOperations.hpp" +#include "jfr/leakprofiler/utilities/vmOperation.hpp" -// Safepoint operation for starting leak profiler object sampler -class StartOperation : public VM_Operation { +// Safepoint operation for creating and starting the leak profiler object sampler +class StartOperation : public OldObjectVMOperation { private: - jlong _sample_count; + int _sample_count; public: - StartOperation(jlong sample_count) : - _sample_count(sample_count) { - } - - Mode evaluation_mode() const { - return _safepoint; - } - - VMOp_Type type() const { - return VMOp_GC_HeapInspection; - } + StartOperation(int sample_count) : _sample_count(sample_count) {} virtual void doit() { - assert(!LeakProfiler::is_running(), "invariant"); - jint queue_size = JfrOptionSet::old_object_queue_size(); - LeakProfiler::set_object_sampler(new ObjectSampler(queue_size)); - log_trace(jfr, system)( "Object sampling started"); + ObjectSampler::create(_sample_count); } }; diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/stopOperation.hpp --- a/src/hotspot/share/jfr/leakprofiler/stopOperation.hpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/leakprofiler/stopOperation.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -25,31 +25,14 @@ #ifndef SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP #define SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP -#include "jfr/leakprofiler/leakProfiler.hpp" #include "jfr/leakprofiler/sampling/objectSampler.hpp" -#include "jfr/recorder/service/jfrOptionSet.hpp" -#include "logging/log.hpp" -#include "runtime/vmOperations.hpp" - -// Safepoint operation for stopping leak profiler object sampler -class StopOperation : public VM_Operation { - public: - StopOperation() {} +#include "jfr/leakprofiler/utilities/vmOperation.hpp" - Mode evaluation_mode() const { - return _safepoint; - } - - VMOp_Type type() const { - return VMOp_GC_HeapInspection; - } - +// Safepoint operation for stopping and destroying the leak profiler object sampler +class StopOperation : public OldObjectVMOperation { + public: virtual void doit() { - assert(LeakProfiler::is_running(), "invariant"); - ObjectSampler* object_sampler = LeakProfiler::object_sampler(); - delete object_sampler; - LeakProfiler::set_object_sampler(NULL); - log_trace(jfr, system)( "Object sampling stopped"); + ObjectSampler::destroy(); } }; diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/leakprofiler/utilities/vmOperation.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/jfr/leakprofiler/utilities/vmOperation.hpp Sat Aug 24 14:30:27 2019 +0200 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP +#define SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP + +#include "runtime/vmOperations.hpp" + +class OldObjectVMOperation : public VM_Operation { + public: + Mode evaluation_mode() const { + return _safepoint; + } + + VMOp_Type type() const { + return VMOp_JFROldObject; + } +}; + +#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/metadata/metadata.xml --- a/src/hotspot/share/jfr/metadata/metadata.xml Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/metadata/metadata.xml Sat Aug 24 14:30:27 2019 +0200 @@ -1019,6 +1019,27 @@ + + + + + + + + + + + + + + + + + + + @@ -1198,6 +1219,10 @@ + + + + diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp --- a/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -198,7 +198,7 @@ } if (write_type) { - JfrCheckpointWriter writer(false, true, Thread::current()); + JfrCheckpointWriter writer; write_interface_types(writer); } static bool is_serializer_registered = false; diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/periodic/jfrOSInterface.cpp --- a/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -67,9 +67,14 @@ private: CPUInformationInterface* _cpu_info_interface; CPUPerformanceInterface* _cpu_perf_interface; - SystemProcessInterface* _system_process_interface; + SystemProcessInterface* _system_process_interface; NetworkPerformanceInterface* _network_performance_interface; + CPUInformationInterface* cpu_info_interface(); + CPUPerformanceInterface* cpu_perf_interface(); + SystemProcessInterface* system_process_interface(); + NetworkPerformanceInterface* network_performance_interface(); + JfrOSInterfaceImpl(); bool initialize(); ~JfrOSInterfaceImpl(); @@ -90,28 +95,57 @@ // system processes information int system_processes(SystemProcess** system_processes, int* no_of_sys_processes); - int network_utilization(NetworkInterface** network_interfaces) const; + int network_utilization(NetworkInterface** network_interfaces); }; JfrOSInterface::JfrOSInterfaceImpl::JfrOSInterfaceImpl() : _cpu_info_interface(NULL), _cpu_perf_interface(NULL), - _system_process_interface(NULL) {} + _system_process_interface(NULL), + _network_performance_interface(NULL) {} + +template +static T* create_interface() { + ResourceMark rm; + T* iface = new T(); + if (iface != NULL) { + if (!iface->initialize()) { + delete iface; + iface = NULL; + } + } + return iface; +} + +CPUInformationInterface* JfrOSInterface::JfrOSInterfaceImpl::cpu_info_interface() { + if (_cpu_info_interface == NULL) { + _cpu_info_interface = create_interface(); + } + return _cpu_info_interface; +} + +CPUPerformanceInterface* JfrOSInterface::JfrOSInterfaceImpl::cpu_perf_interface() { + if (_cpu_perf_interface == NULL) { + _cpu_perf_interface = create_interface(); + } + return _cpu_perf_interface; +} + +SystemProcessInterface* JfrOSInterface::JfrOSInterfaceImpl::system_process_interface() { + if (_system_process_interface == NULL) { + _system_process_interface = create_interface(); + } + return _system_process_interface; +} + +NetworkPerformanceInterface* JfrOSInterface::JfrOSInterfaceImpl::network_performance_interface() { + if (_network_performance_interface == NULL) { + _network_performance_interface = create_interface(); + } + return _network_performance_interface; +} bool JfrOSInterface::JfrOSInterfaceImpl::initialize() { - _cpu_info_interface = new CPUInformationInterface(); - if (!(_cpu_info_interface != NULL && _cpu_info_interface->initialize())) { - return false; - } - _cpu_perf_interface = new CPUPerformanceInterface(); - if (!(_cpu_perf_interface != NULL && _cpu_perf_interface->initialize())) { - return false; - } - _system_process_interface = new SystemProcessInterface(); - if (!(_system_process_interface != NULL && _system_process_interface->initialize())) { - return false; - } - _network_performance_interface = new NetworkPerformanceInterface(); - return _network_performance_interface != NULL && _network_performance_interface->initialize(); + return true; } JfrOSInterface::JfrOSInterfaceImpl::~JfrOSInterfaceImpl(void) { @@ -133,36 +167,43 @@ } } +int JfrOSInterface::JfrOSInterfaceImpl::cpu_information(CPUInformation& cpu_info) { + CPUInformationInterface* const iface = cpu_info_interface(); + return iface == NULL ? OS_ERR : iface->cpu_information(cpu_info); +} + int JfrOSInterface::JfrOSInterfaceImpl::cpu_load(int which_logical_cpu, double* cpu_load) { - return _cpu_perf_interface->cpu_load(which_logical_cpu, cpu_load); + CPUPerformanceInterface* const iface = cpu_perf_interface(); + return iface == NULL ? OS_ERR : iface->cpu_load(which_logical_cpu, cpu_load); } int JfrOSInterface::JfrOSInterfaceImpl::context_switch_rate(double* rate) { - return _cpu_perf_interface->context_switch_rate(rate); + CPUPerformanceInterface* const iface = cpu_perf_interface(); + return iface == NULL ? OS_ERR : iface->context_switch_rate(rate); } int JfrOSInterface::JfrOSInterfaceImpl::cpu_load_total_process(double* cpu_load) { - return _cpu_perf_interface->cpu_load_total_process(cpu_load); + CPUPerformanceInterface* const iface = cpu_perf_interface(); + return iface == NULL ? OS_ERR : iface->cpu_load_total_process(cpu_load); } int JfrOSInterface::JfrOSInterfaceImpl::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotal) { - return _cpu_perf_interface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal); -} - -int JfrOSInterface::JfrOSInterfaceImpl::cpu_information(CPUInformation& cpu_info) { - return _cpu_info_interface->cpu_information(cpu_info); + CPUPerformanceInterface* const iface = cpu_perf_interface(); + return iface == NULL ? OS_ERR : iface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal); } int JfrOSInterface::JfrOSInterfaceImpl::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) { assert(system_processes != NULL, "system_processes pointer is NULL!"); assert(no_of_sys_processes != NULL, "no_of_sys_processes pointer is NULL!"); - return _system_process_interface->system_processes(system_processes, no_of_sys_processes); + SystemProcessInterface* const iface = system_process_interface(); + return iface == NULL ? OS_ERR : iface->system_processes(system_processes, no_of_sys_processes); } -int JfrOSInterface::JfrOSInterfaceImpl::network_utilization(NetworkInterface** network_interfaces) const { - return _network_performance_interface->network_utilization(network_interfaces); +int JfrOSInterface::JfrOSInterfaceImpl::network_utilization(NetworkInterface** network_interfaces) { + NetworkPerformanceInterface* const iface = network_performance_interface(); + return iface == NULL ? OS_ERR : iface->network_utilization(network_interfaces); } // assigned char* is RESOURCE_HEAP_ALLOCATED diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/periodic/jfrPeriodic.cpp --- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -65,7 +65,9 @@ #include "services/threadService.hpp" #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" - +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/shenandoahJfrSupport.hpp" +#endif /** * JfrPeriodic class * Implementation of declarations in @@ -629,3 +631,14 @@ event.set_flushingEnabled(UseCodeCacheFlushing); event.commit(); } + + +TRACE_REQUEST_FUNC(ShenandoahHeapRegionInformation) { +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + VM_ShenandoahSendHeapRegionInfoEvents op; + VMThread::execute(&op); + } +#endif +} + diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp --- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -462,8 +462,8 @@ last_native_ms = last_java_ms; } _sample.signal(); - jlong java_interval = _interval_java == 0 ? max_jlong : MAX2(_interval_java, 10); - jlong native_interval = _interval_native == 0 ? max_jlong : MAX2(_interval_native, 10); + jlong java_interval = _interval_java == 0 ? max_jlong : MAX2(_interval_java, 1); + jlong native_interval = _interval_native == 0 ? max_jlong : MAX2(_interval_native, 1); jlong now_ms = get_monotonic_ms(); diff -r 84ef29ccac56 -r 00860d9caf4d src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp --- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp Fri Aug 23 18:47:55 2019 +0200 +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp Sat Aug 24 14:30:27 2019 +0200 @@ -91,22 +91,18 @@ static const size_t checkpoint_buffer_cache_count = 2; static const size_t checkpoint_buffer_size = 512 * K; -static JfrCheckpointMspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrCheckpointManager* system) { - JfrCheckpointMspace* mspace = new JfrCheckpointMspace(buffer_size, limit, cache_count, system); - if (mspace != NULL) { - mspace->initialize(); - } - return mspace; +static JfrCheckpointMspace* allocate_mspace(size_t size, size_t limit, size_t cache_count, JfrCheckpointManager* mgr) { + return create_mspace(size, limit, cache_count, mgr); } bool JfrCheckpointManager::initialize() { assert(_free_list_mspace == NULL, "invariant"); - _free_list_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this); + _free_list_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this); if (_free_list_mspace == NULL) { return false; } assert(_epoch_transition_mspace == NULL, "invariant"); - _epoch_transition_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this); + _epoch_transition_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this); if (_epoch_transition_mspace == NULL) { return false; } @@ -118,22 +114,6 @@ return JfrTypeManager::initialize(); } -bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const { - return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch(); -} - -void JfrCheckpointManager::synchronize_epoch() { - assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant"); - OrderAccess::storestore(); - _checkpoint_epoch_state = JfrTraceIdEpoch::epoch(); -} - -void JfrCheckpointManager::shift_epoch() { - debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();) - JfrTraceIdEpoch::shift_epoch(); - assert(current_epoch != JfrTraceIdEpoch::current(), "invariant"); -} - void JfrCheckpointManager::register_service_thread(const Thread* thread) { _service_thread = thread; } @@ -155,7 +135,6 @@ } #ifdef ASSERT - bool JfrCheckpointManager::is_locked() const { return _lock->owned_by_self(); } @@ -171,7 +150,6 @@ assert(buffer->lease(), "invariant"); assert(buffer->acquired_by_self(), "invariant"); } - #endif // ASSERT static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread) { @@ -189,6 +167,10 @@ return buffer; } +bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const { + return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch(); +} + static const size_t lease_retry = 10; BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) { @@ -252,41 +234,37 @@ return read_data(data + duration_offset); } -static bool is_flushpoint(const u1* data) { - return read_data(data + flushpoint_offset) == (juint)1; -} - static juint number_of_types(const u1* data) { return read_data(data + types_offset); } -static void write_checkpoint_header(JfrChunkWriter& cw, intptr_t offset_prev_cp_event, const u1* data) { +static void write_checkpoint_header(JfrChunkWriter& cw, int64_t offset_prev_cp_event, const u1* data) { cw.reserve(sizeof(u4)); - cw.write((u8)EVENT_CHECKPOINT); - cw.write(starttime(data)); - cw.write(duration(data)); - cw.write((jlong)offset_prev_cp_event); - cw.write(is_flushpoint(data)); - cw.write(number_of_types(data)); + cw.write(EVENT_CHECKPOINT); + cw.write(starttime(data)); + cw.write(duration(data)); + cw.write(offset_prev_cp_event); + cw.write(false); // not a flushpoint + cw.write(number_of_types(data)); } static void write_checkpoint_content(JfrChunkWriter& cw, const u1* data, size_t size) { assert(data != NULL, "invariant"); - cw.write_unbuffered(data + payload_offset, size); + cw.write_unbuffered(data + payload_offset, size - sizeof(JfrCheckpointEntry)); } static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) { assert(data != NULL, "invariant"); + const int64_t event_begin = cw.current_offset(); const int64_t last_checkpoint_event = cw.last_checkpoint_offset(); - const int64_t event_begin = cw.current_offset(); - const int64_t offset_to_last_checkpoint_event = 0 == last_checkpoint_event ? 0 : last_checkpoint_event - event_begin; - const int64_t total_checkpoint_size = total_size(data); - write_checkpoint_header(cw, offset_to_last_checkpoint_event, data); - write_checkpoint_content(cw, data, total_checkpoint_size - sizeof(JfrCheckpointEntry)); - const int64_t checkpoint_event_size = cw.current_offset() - event_begin; - cw.write_padded_at_offset(checkpoint_event_size, event_begin); + const int64_t delta = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin; + const int64_t checkpoint_size = total_size(data); + write_checkpoint_header(cw, delta, data); + write_checkpoint_content(cw, data, checkpoint_size); + const int64_t event_size = cw.current_offset() - event_begin; + cw.write_padded_at_offset(event_size, event_begin); cw.set_last_checkpoint_offset(event_begin); - return (size_t)total_checkpoint_size; + return (size_t)checkpoint_size; } static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) { @@ -294,14 +272,14 @@ assert(data != NULL, "invariant"); assert(size > 0, "invariant"); const u1* const limit = data + size; - const u1* next_entry = data; + const u1* next = data; size_t processed = 0; - while (next_entry < limit) { - const size_t checkpoint_size = write_checkpoint_event(cw, next_entry); + while (next < limit) { + const size_t checkpoint_size = write_checkpoint_event(cw, next); processed += checkpoint_size; - next_entry += checkpoint_size; + next += checkpoint_size; } - assert(next_entry == limit, "invariant"); + assert(next == limit, "invariant"); return processed; } @@ -321,57 +299,30 @@ }; typedef CheckpointWriteOp WriteOperation; -typedef MutexedWriteOp MutexedWriteOperation; typedef ReleaseOp CheckpointReleaseOperation; -typedef CompositeOperation CheckpointWriteOperation; -static size_t write_mspace_exclusive(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) { - Thread* const thread = Thread::current(); +template