50113
|
1 |
/*
|
|
2 |
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "precompiled.hpp"
|
|
26 |
#include "classfile/javaClasses.hpp"
|
|
27 |
#include "jfr/leakprofiler/chains/edge.hpp"
|
|
28 |
#include "jfr/leakprofiler/chains/edgeStore.hpp"
|
|
29 |
#include "jfr/leakprofiler/chains/edgeUtils.hpp"
|
|
30 |
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
|
|
31 |
#include "oops/fieldStreams.hpp"
|
|
32 |
#include "oops/instanceKlass.hpp"
|
|
33 |
#include "oops/objArrayOop.inline.hpp"
|
|
34 |
#include "oops/oopsHierarchy.hpp"
|
|
35 |
#include "runtime/handles.inline.hpp"
|
|
36 |
|
|
37 |
bool EdgeUtils::is_leak_edge(const Edge& edge) {
|
|
38 |
return (const Edge*)edge.pointee()->mark() == &edge;
|
|
39 |
}
|
|
40 |
|
|
41 |
bool EdgeUtils::is_root(const Edge& edge) {
|
|
42 |
return edge.is_root();
|
|
43 |
}
|
|
44 |
|
|
45 |
static int field_offset(const Edge& edge) {
|
|
46 |
assert(!edge.is_root(), "invariant");
|
|
47 |
const oop ref_owner = edge.reference_owner();
|
|
48 |
assert(ref_owner != NULL, "invariant");
|
|
49 |
const oop* reference = UnifiedOop::decode(edge.reference());
|
|
50 |
assert(reference != NULL, "invariant");
|
|
51 |
assert(!UnifiedOop::is_narrow(reference), "invariant");
|
|
52 |
assert(!ref_owner->is_array(), "invariant");
|
|
53 |
assert(ref_owner->is_instance(), "invariant");
|
|
54 |
const int offset = (int)pointer_delta(reference, ref_owner, sizeof(char));
|
|
55 |
assert(offset < (ref_owner->size() * HeapWordSize), "invariant");
|
|
56 |
return offset;
|
|
57 |
}
|
|
58 |
|
|
59 |
static const InstanceKlass* field_type(const Edge& edge) {
|
|
60 |
assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant");
|
|
61 |
return (const InstanceKlass*)edge.reference_owner_klass();
|
|
62 |
}
|
|
63 |
|
|
64 |
const Symbol* EdgeUtils::field_name_symbol(const Edge& edge) {
|
|
65 |
assert(!edge.is_root(), "invariant");
|
|
66 |
assert(!is_array_element(edge), "invariant");
|
|
67 |
const int offset = field_offset(edge);
|
|
68 |
const InstanceKlass* ik = field_type(edge);
|
|
69 |
while (ik != NULL) {
|
|
70 |
JavaFieldStream jfs(ik);
|
|
71 |
while (!jfs.done()) {
|
|
72 |
if (offset == jfs.offset()) {
|
|
73 |
return jfs.name();
|
|
74 |
}
|
|
75 |
jfs.next();
|
|
76 |
}
|
|
77 |
ik = (InstanceKlass*)ik->super();
|
|
78 |
}
|
|
79 |
return NULL;
|
|
80 |
}
|
|
81 |
|
|
82 |
jshort EdgeUtils::field_modifiers(const Edge& edge) {
|
|
83 |
const int offset = field_offset(edge);
|
|
84 |
const InstanceKlass* ik = field_type(edge);
|
|
85 |
|
|
86 |
while (ik != NULL) {
|
|
87 |
JavaFieldStream jfs(ik);
|
|
88 |
while (!jfs.done()) {
|
|
89 |
if (offset == jfs.offset()) {
|
|
90 |
return jfs.access_flags().as_short();
|
|
91 |
}
|
|
92 |
jfs.next();
|
|
93 |
}
|
|
94 |
ik = (InstanceKlass*)ik->super();
|
|
95 |
}
|
|
96 |
return 0;
|
|
97 |
}
|
|
98 |
|
|
99 |
bool EdgeUtils::is_array_element(const Edge& edge) {
|
|
100 |
assert(!edge.is_root(), "invariant");
|
|
101 |
const oop ref_owner = edge.reference_owner();
|
|
102 |
assert(ref_owner != NULL, "invariant");
|
|
103 |
return ref_owner->is_objArray();
|
|
104 |
}
|
|
105 |
|
|
106 |
static int array_offset(const Edge& edge) {
|
|
107 |
assert(!edge.is_root(), "invariant");
|
|
108 |
const oop ref_owner = edge.reference_owner();
|
|
109 |
assert(ref_owner != NULL, "invariant");
|
|
110 |
const oop* reference = UnifiedOop::decode(edge.reference());
|
|
111 |
assert(reference != NULL, "invariant");
|
|
112 |
assert(!UnifiedOop::is_narrow(reference), "invariant");
|
|
113 |
assert(ref_owner->is_array(), "invariant");
|
|
114 |
const objArrayOop ref_owner_array = static_cast<const objArrayOop>(ref_owner);
|
|
115 |
const int offset = (int)pointer_delta(reference, ref_owner_array->base(), heapOopSize);
|
|
116 |
assert(offset >= 0 && offset < ref_owner_array->length(), "invariant");
|
|
117 |
return offset;
|
|
118 |
}
|
|
119 |
|
|
120 |
int EdgeUtils::array_index(const Edge& edge) {
|
|
121 |
return is_array_element(edge) ? array_offset(edge) : 0;
|
|
122 |
}
|
|
123 |
|
|
124 |
int EdgeUtils::array_size(const Edge& edge) {
|
|
125 |
if (is_array_element(edge)) {
|
|
126 |
const oop ref_owner = edge.reference_owner();
|
|
127 |
assert(ref_owner != NULL, "invariant");
|
|
128 |
assert(ref_owner->is_objArray(), "invariant");
|
|
129 |
return ((objArrayOop)(ref_owner))->length();
|
|
130 |
}
|
|
131 |
return 0;
|
|
132 |
}
|
|
133 |
|
|
134 |
const Edge* EdgeUtils::root(const Edge& edge) {
|
|
135 |
const Edge* current = &edge;
|
|
136 |
const Edge* parent = current->parent();
|
|
137 |
while (parent != NULL) {
|
|
138 |
current = parent;
|
|
139 |
parent = current->parent();
|
|
140 |
}
|
|
141 |
return current;
|
|
142 |
}
|
|
143 |
|
|
144 |
// The number of references associated with the leak node;
|
|
145 |
// can be viewed as the leak node "context".
|
|
146 |
// Used to provide leak context for a "capped/skipped" reference chain.
|
|
147 |
static const size_t leak_context = 100;
|
|
148 |
|
|
149 |
// The number of references associated with the root node;
|
|
150 |
// can be viewed as the root node "context".
|
|
151 |
// Used to provide root context for a "capped/skipped" reference chain.
|
|
152 |
static const size_t root_context = 100;
|
|
153 |
|
|
154 |
// A limit on the reference chain depth to be serialized,
|
|
155 |
static const size_t max_ref_chain_depth = leak_context + root_context;
|
|
156 |
|
|
157 |
const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) {
|
|
158 |
const RoutableEdge* current = &edge;
|
|
159 |
const RoutableEdge* parent = current->physical_parent();
|
|
160 |
size_t seek = 0;
|
|
161 |
while (parent != NULL && seek != skip_length) {
|
|
162 |
seek++;
|
|
163 |
current = parent;
|
|
164 |
parent = parent->physical_parent();
|
|
165 |
}
|
|
166 |
return current;
|
|
167 |
}
|
|
168 |
|
|
169 |
#ifdef ASSERT
|
|
170 |
static void validate_skip_target(const RoutableEdge* skip_target) {
|
|
171 |
assert(skip_target != NULL, "invariant");
|
|
172 |
assert(skip_target->distance_to_root() + 1 == root_context, "invariant");
|
|
173 |
assert(skip_target->is_sentinel(), "invariant");
|
|
174 |
}
|
|
175 |
|
|
176 |
static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) {
|
|
177 |
assert(new_skip_edge != NULL, "invariant");
|
|
178 |
assert(new_skip_edge->is_skip_edge(), "invariant");
|
|
179 |
if (last_skip_edge != NULL) {
|
|
180 |
const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment);
|
|
181 |
validate_skip_target(target->logical_parent());
|
|
182 |
return;
|
|
183 |
}
|
|
184 |
assert(last_skip_edge == NULL, "invariant");
|
|
185 |
// only one level of logical indirection
|
|
186 |
validate_skip_target(new_skip_edge->logical_parent());
|
|
187 |
}
|
|
188 |
#endif // ASSERT
|
|
189 |
|
|
190 |
static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) {
|
|
191 |
assert(new_skip_edge != NULL, "invariant");
|
|
192 |
assert(!new_skip_edge->is_skip_edge(), "invariant");
|
|
193 |
assert(!new_skip_edge->processed(), "invariant");
|
|
194 |
const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance);
|
|
195 |
assert(skip_target != NULL, "invariant");
|
|
196 |
new_skip_edge->set_skip_edge(skip_target);
|
|
197 |
new_skip_edge->set_skip_length(skip_target_distance);
|
|
198 |
assert(new_skip_edge->is_skip_edge(), "invariant");
|
|
199 |
assert(new_skip_edge->logical_parent() == skip_target, "invariant");
|
|
200 |
}
|
|
201 |
|
|
202 |
static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) {
|
|
203 |
assert(distance == 0, "invariant");
|
|
204 |
const RoutableEdge* current = &edge;
|
|
205 |
while (current != NULL) {
|
|
206 |
if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) {
|
|
207 |
return current;
|
|
208 |
}
|
|
209 |
current = current->physical_parent();
|
|
210 |
++distance;
|
|
211 |
}
|
|
212 |
return current;
|
|
213 |
}
|
|
214 |
|
|
215 |
static void collapse_overlapping_chain(const RoutableEdge& edge,
|
|
216 |
const RoutableEdge* first_processed_edge,
|
|
217 |
size_t first_processed_distance) {
|
|
218 |
assert(first_processed_edge != NULL, "invariant");
|
|
219 |
// first_processed_edge is already processed / written
|
|
220 |
assert(first_processed_edge->processed(), "invariant");
|
|
221 |
assert(first_processed_distance + 1 <= leak_context, "invariant");
|
|
222 |
|
|
223 |
// from this first processed edge, attempt to fetch the last skip edge
|
|
224 |
size_t last_skip_edge_distance = 0;
|
|
225 |
const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance);
|
|
226 |
const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1;
|
|
227 |
|
|
228 |
if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) {
|
|
229 |
// complete chain can be accommodated without modification
|
|
230 |
return;
|
|
231 |
}
|
|
232 |
|
|
233 |
// backtrack one edge from existing processed edge
|
|
234 |
const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1);
|
|
235 |
assert(new_skip_edge != NULL, "invariant");
|
|
236 |
assert(!new_skip_edge->processed(), "invariant");
|
|
237 |
assert(new_skip_edge->parent() == first_processed_edge, "invariant");
|
|
238 |
|
|
239 |
size_t adjustment = 0;
|
|
240 |
if (last_skip_edge != NULL) {
|
|
241 |
assert(leak_context - 1 > first_processed_distance - 1, "invariant");
|
|
242 |
adjustment = leak_context - first_processed_distance - 1;
|
|
243 |
assert(last_skip_edge_distance + 1 > adjustment, "invariant");
|
|
244 |
install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment);
|
|
245 |
} else {
|
|
246 |
install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context);
|
|
247 |
new_skip_edge->logical_parent()->set_skip_length(1); // sentinel
|
|
248 |
}
|
|
249 |
|
|
250 |
DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);)
|
|
251 |
}
|
|
252 |
|
|
253 |
static void collapse_non_overlapping_chain(const RoutableEdge& edge,
|
|
254 |
const RoutableEdge* first_processed_edge,
|
|
255 |
size_t first_processed_distance) {
|
|
256 |
assert(first_processed_edge != NULL, "invariant");
|
|
257 |
assert(!first_processed_edge->processed(), "invariant");
|
|
258 |
// this implies that the first "processed" edge is the leak context relative "leaf"
|
|
259 |
assert(first_processed_distance + 1 == leak_context, "invariant");
|
|
260 |
|
|
261 |
const size_t distance_to_root = edge.distance_to_root();
|
|
262 |
if (distance_to_root + 1 <= max_ref_chain_depth) {
|
|
263 |
// complete chain can be accommodated without constructing a skip edge
|
|
264 |
return;
|
|
265 |
}
|
|
266 |
|
|
267 |
install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context);
|
|
268 |
first_processed_edge->logical_parent()->set_skip_length(1); // sentinel
|
|
269 |
|
|
270 |
DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);)
|
|
271 |
}
|
|
272 |
|
|
273 |
static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) {
|
|
274 |
assert(distance == 0, "invariant");
|
|
275 |
const RoutableEdge* current = &edge;
|
|
276 |
while (current != NULL && distance < leak_context - 1) {
|
|
277 |
if (current->processed()) {
|
|
278 |
return current;
|
|
279 |
}
|
|
280 |
current = current->physical_parent();
|
|
281 |
++distance;
|
|
282 |
}
|
|
283 |
assert(distance <= leak_context - 1, "invariant");
|
|
284 |
return current;
|
|
285 |
}
|
|
286 |
|
|
287 |
/*
|
|
288 |
* Some vocabulary:
|
|
289 |
* -----------
|
|
290 |
* "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges.
|
|
291 |
* "Processed / written" means an edge that has already been serialized.
|
|
292 |
* "Skip edge" is an edge that contains additional information for logical routing purposes.
|
|
293 |
* "Skip target" is an edge used as a destination for a skip edge
|
|
294 |
*/
|
|
295 |
void EdgeUtils::collapse_chain(const RoutableEdge& edge) {
|
|
296 |
assert(is_leak_edge(edge), "invariant");
|
|
297 |
|
|
298 |
// attempt to locate an already processed edge inside current leak context (if any)
|
|
299 |
size_t first_processed_distance = 0;
|
|
300 |
const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance);
|
|
301 |
if (first_processed_edge == NULL) {
|
|
302 |
return;
|
|
303 |
}
|
|
304 |
|
|
305 |
if (first_processed_edge->processed()) {
|
|
306 |
collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance);
|
|
307 |
} else {
|
|
308 |
collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance);
|
|
309 |
}
|
|
310 |
|
|
311 |
assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant");
|
|
312 |
}
|