author | mgerdin |
Thu, 23 Feb 2012 14:58:35 +0100 | |
changeset 12095 | cc3d6f08a4c4 |
parent 8921 | 14bfe81f2a9d |
child 13195 | be27e1b6a4b9 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
8921
14bfe81f2a9d
7010070: Update all 2010 Oracle-changed OpenJDK files to have the proper copyright dates - second pass
trims
parents:
8076
diff
changeset
|
2 |
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2131
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2131
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2131
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "classfile/classLoader.hpp" |
|
27 |
#include "code/vtableStubs.hpp" |
|
28 |
#include "gc_interface/collectedHeap.inline.hpp" |
|
29 |
#include "interpreter/interpreter.hpp" |
|
30 |
#include "memory/allocation.inline.hpp" |
|
31 |
#include "memory/universe.inline.hpp" |
|
32 |
#include "oops/oop.inline.hpp" |
|
33 |
#include "oops/oop.inline2.hpp" |
|
8076
96d498ec7ae1
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
7397
diff
changeset
|
34 |
#include "oops/symbol.hpp" |
7397 | 35 |
#include "runtime/deoptimization.hpp" |
36 |
#include "runtime/fprofiler.hpp" |
|
37 |
#include "runtime/mutexLocker.hpp" |
|
38 |
#include "runtime/stubCodeGenerator.hpp" |
|
39 |
#include "runtime/stubRoutines.hpp" |
|
40 |
#include "runtime/task.hpp" |
|
41 |
#include "runtime/vframe.hpp" |
|
42 |
#include "utilities/macros.hpp" |
|
1 | 43 |
|
44 |
// Static fields of FlatProfiler |
|
45 |
int FlatProfiler::received_gc_ticks = 0; |
|
46 |
int FlatProfiler::vm_operation_ticks = 0; |
|
47 |
int FlatProfiler::threads_lock_ticks = 0; |
|
48 |
int FlatProfiler::class_loader_ticks = 0; |
|
49 |
int FlatProfiler::extra_ticks = 0; |
|
50 |
int FlatProfiler::blocked_ticks = 0; |
|
51 |
int FlatProfiler::deopt_ticks = 0; |
|
52 |
int FlatProfiler::unknown_ticks = 0; |
|
53 |
int FlatProfiler::interpreter_ticks = 0; |
|
54 |
int FlatProfiler::compiler_ticks = 0; |
|
55 |
int FlatProfiler::received_ticks = 0; |
|
56 |
int FlatProfiler::delivered_ticks = 0; |
|
57 |
int* FlatProfiler::bytecode_ticks = NULL; |
|
58 |
int* FlatProfiler::bytecode_ticks_stub = NULL; |
|
59 |
int FlatProfiler::all_int_ticks = 0; |
|
60 |
int FlatProfiler::all_comp_ticks = 0; |
|
61 |
int FlatProfiler::all_ticks = 0; |
|
62 |
bool FlatProfiler::full_profile_flag = false; |
|
63 |
ThreadProfiler* FlatProfiler::thread_profiler = NULL; |
|
64 |
ThreadProfiler* FlatProfiler::vm_thread_profiler = NULL; |
|
65 |
FlatProfilerTask* FlatProfiler::task = NULL; |
|
66 |
elapsedTimer FlatProfiler::timer; |
|
67 |
int FlatProfiler::interval_ticks_previous = 0; |
|
68 |
IntervalData* FlatProfiler::interval_data = NULL; |
|
69 |
||
70 |
ThreadProfiler::ThreadProfiler() { |
|
71 |
// Space for the ProfilerNodes |
|
72 |
const int area_size = 1 * ProfilerNodeSize * 1024; |
|
73 |
area_bottom = AllocateHeap(area_size, "fprofiler"); |
|
74 |
area_top = area_bottom; |
|
75 |
area_limit = area_bottom + area_size; |
|
76 |
||
77 |
// ProfilerNode pointer table |
|
78 |
table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size); |
|
79 |
initialize(); |
|
80 |
engaged = false; |
|
81 |
} |
|
82 |
||
83 |
ThreadProfiler::~ThreadProfiler() { |
|
84 |
FreeHeap(area_bottom); |
|
85 |
area_bottom = NULL; |
|
86 |
area_top = NULL; |
|
87 |
area_limit = NULL; |
|
88 |
FreeHeap(table); |
|
89 |
table = NULL; |
|
90 |
} |
|
91 |
||
92 |
// Statics for ThreadProfiler |
|
93 |
int ThreadProfiler::table_size = 1024; |
|
94 |
||
95 |
int ThreadProfiler::entry(int value) { |
|
96 |
value = (value > 0) ? value : -value; |
|
97 |
return value % table_size; |
|
98 |
} |
|
99 |
||
100 |
ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) { |
|
101 |
_r = r; |
|
102 |
_pp = NULL; |
|
103 |
assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds"); |
|
104 |
Thread* tp = Thread::current(); |
|
105 |
if (tp != NULL && tp->is_Java_thread()) { |
|
106 |
JavaThread* jtp = (JavaThread*) tp; |
|
107 |
ThreadProfiler* pp = jtp->get_thread_profiler(); |
|
108 |
_pp = pp; |
|
109 |
if (pp != NULL) { |
|
110 |
pp->region_flag[r] = true; |
|
111 |
} |
|
112 |
} |
|
113 |
} |
|
114 |
||
115 |
ThreadProfilerMark::~ThreadProfilerMark() { |
|
116 |
if (_pp != NULL) { |
|
117 |
_pp->region_flag[_r] = false; |
|
118 |
} |
|
119 |
_pp = NULL; |
|
120 |
} |
|
121 |
||
122 |
// Random other statics |
|
123 |
static const int col1 = 2; // position of output column 1 |
|
124 |
static const int col2 = 11; // position of output column 2 |
|
125 |
static const int col3 = 25; // position of output column 3 |
|
126 |
static const int col4 = 55; // position of output column 4 |
|
127 |
||
128 |
||
129 |
// Used for detailed profiling of nmethods. |
|
130 |
class PCRecorder : AllStatic { |
|
131 |
private: |
|
132 |
static int* counters; |
|
133 |
static address base; |
|
134 |
enum { |
|
135 |
bucket_size = 16 |
|
136 |
}; |
|
137 |
static int index_for(address pc) { return (pc - base)/bucket_size; } |
|
138 |
static address pc_for(int index) { return base + (index * bucket_size); } |
|
139 |
static int size() { |
|
140 |
return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord; |
|
141 |
} |
|
142 |
public: |
|
143 |
static address bucket_start_for(address pc) { |
|
144 |
if (counters == NULL) return NULL; |
|
145 |
return pc_for(index_for(pc)); |
|
146 |
} |
|
147 |
static int bucket_count_for(address pc) { return counters[index_for(pc)]; } |
|
148 |
static void init(); |
|
149 |
static void record(address pc); |
|
150 |
static void print(); |
|
151 |
static void print_blobs(CodeBlob* cb); |
|
152 |
}; |
|
153 |
||
154 |
int* PCRecorder::counters = NULL; |
|
155 |
address PCRecorder::base = NULL; |
|
156 |
||
157 |
void PCRecorder::init() { |
|
158 |
MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
|
159 |
int s = size(); |
|
160 |
counters = NEW_C_HEAP_ARRAY(int, s); |
|
161 |
for (int index = 0; index < s; index++) { |
|
162 |
counters[index] = 0; |
|
163 |
} |
|
164 |
base = CodeCache::first_address(); |
|
165 |
} |
|
166 |
||
167 |
void PCRecorder::record(address pc) { |
|
168 |
if (counters == NULL) return; |
|
169 |
assert(CodeCache::contains(pc), "must be in CodeCache"); |
|
170 |
counters[index_for(pc)]++; |
|
171 |
} |
|
172 |
||
173 |
||
174 |
address FlatProfiler::bucket_start_for(address pc) { |
|
175 |
return PCRecorder::bucket_start_for(pc); |
|
176 |
} |
|
177 |
||
178 |
int FlatProfiler::bucket_count_for(address pc) { |
|
179 |
return PCRecorder::bucket_count_for(pc); |
|
180 |
} |
|
181 |
||
182 |
void PCRecorder::print() { |
|
183 |
if (counters == NULL) return; |
|
184 |
||
185 |
tty->cr(); |
|
186 |
tty->print_cr("Printing compiled methods with PC buckets having more than %d ticks", ProfilerPCTickThreshold); |
|
187 |
tty->print_cr("==================================================================="); |
|
188 |
tty->cr(); |
|
189 |
||
190 |
GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20); |
|
191 |
||
192 |
||
193 |
int s; |
|
194 |
{ |
|
195 |
MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
|
196 |
s = size(); |
|
197 |
} |
|
198 |
||
199 |
for (int index = 0; index < s; index++) { |
|
200 |
int count = counters[index]; |
|
201 |
if (count > ProfilerPCTickThreshold) { |
|
202 |
address pc = pc_for(index); |
|
203 |
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); |
|
204 |
if (cb != NULL && candidates->find(cb) < 0) { |
|
205 |
candidates->push(cb); |
|
206 |
} |
|
207 |
} |
|
208 |
} |
|
209 |
for (int i = 0; i < candidates->length(); i++) { |
|
210 |
print_blobs(candidates->at(i)); |
|
211 |
} |
|
212 |
} |
|
213 |
||
214 |
void PCRecorder::print_blobs(CodeBlob* cb) { |
|
215 |
if (cb != NULL) { |
|
216 |
cb->print(); |
|
217 |
if (cb->is_nmethod()) { |
|
218 |
((nmethod*)cb)->print_code(); |
|
219 |
} |
|
220 |
tty->cr(); |
|
221 |
} else { |
|
222 |
tty->print_cr("stub code"); |
|
223 |
} |
|
224 |
} |
|
225 |
||
226 |
class tick_counter { // holds tick info for one node |
|
227 |
public: |
|
228 |
int ticks_in_code; |
|
229 |
int ticks_in_native; |
|
230 |
||
231 |
tick_counter() { ticks_in_code = ticks_in_native = 0; } |
|
232 |
tick_counter(int code, int native) { ticks_in_code = code; ticks_in_native = native; } |
|
233 |
||
234 |
int total() const { |
|
235 |
return (ticks_in_code + ticks_in_native); |
|
236 |
} |
|
237 |
||
238 |
void add(tick_counter* a) { |
|
239 |
ticks_in_code += a->ticks_in_code; |
|
240 |
ticks_in_native += a->ticks_in_native; |
|
241 |
} |
|
242 |
||
243 |
void update(TickPosition where) { |
|
244 |
switch(where) { |
|
245 |
case tp_code: ticks_in_code++; break; |
|
246 |
case tp_native: ticks_in_native++; break; |
|
247 |
} |
|
248 |
} |
|
249 |
||
250 |
void print_code(outputStream* st, int total_ticks) { |
|
251 |
st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code); |
|
252 |
} |
|
253 |
||
254 |
void print_native(outputStream* st) { |
|
255 |
st->print(" + %5d ", ticks_in_native); |
|
256 |
} |
|
257 |
}; |
|
258 |
||
259 |
class ProfilerNode { |
|
260 |
private: |
|
261 |
ProfilerNode* _next; |
|
262 |
public: |
|
263 |
tick_counter ticks; |
|
264 |
||
265 |
public: |
|
266 |
||
267 |
void* operator new(size_t size, ThreadProfiler* tp); |
|
268 |
void operator delete(void* p); |
|
269 |
||
270 |
ProfilerNode() { |
|
271 |
_next = NULL; |
|
272 |
} |
|
273 |
||
274 |
virtual ~ProfilerNode() { |
|
275 |
if (_next) |
|
276 |
delete _next; |
|
277 |
} |
|
278 |
||
279 |
void set_next(ProfilerNode* n) { _next = n; } |
|
280 |
ProfilerNode* next() { return _next; } |
|
281 |
||
282 |
void update(TickPosition where) { ticks.update(where);} |
|
283 |
int total_ticks() { return ticks.total(); } |
|
284 |
||
285 |
virtual bool is_interpreted() const { return false; } |
|
286 |
virtual bool is_compiled() const { return false; } |
|
287 |
virtual bool is_stub() const { return false; } |
|
288 |
virtual bool is_runtime_stub() const{ return false; } |
|
289 |
virtual void oops_do(OopClosure* f) = 0; |
|
290 |
||
291 |
virtual bool interpreted_match(methodOop m) const { return false; } |
|
292 |
virtual bool compiled_match(methodOop m ) const { return false; } |
|
293 |
virtual bool stub_match(methodOop m, const char* name) const { return false; } |
|
294 |
virtual bool adapter_match() const { return false; } |
|
295 |
virtual bool runtimeStub_match(const CodeBlob* stub, const char* name) const { return false; } |
|
296 |
virtual bool unknown_compiled_match(const CodeBlob* cb) const { return false; } |
|
297 |
||
298 |
static void print_title(outputStream* st) { |
|
299 |
st->print(" + native"); |
|
300 |
st->fill_to(col3); |
|
301 |
st->print("Method"); |
|
302 |
st->fill_to(col4); |
|
303 |
st->cr(); |
|
304 |
} |
|
305 |
||
306 |
static void print_total(outputStream* st, tick_counter* t, int total, const char* msg) { |
|
307 |
t->print_code(st, total); |
|
308 |
st->fill_to(col2); |
|
309 |
t->print_native(st); |
|
310 |
st->fill_to(col3); |
|
311 |
st->print(msg); |
|
312 |
st->cr(); |
|
313 |
} |
|
314 |
||
315 |
virtual methodOop method() = 0; |
|
316 |
||
317 |
virtual void print_method_on(outputStream* st) { |
|
318 |
int limit; |
|
319 |
int i; |
|
320 |
methodOop m = method(); |
|
8076
96d498ec7ae1
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
7397
diff
changeset
|
321 |
Symbol* k = m->klass_name(); |
1 | 322 |
// Print the class name with dots instead of slashes |
323 |
limit = k->utf8_length(); |
|
324 |
for (i = 0 ; i < limit ; i += 1) { |
|
325 |
char c = (char) k->byte_at(i); |
|
326 |
if (c == '/') { |
|
327 |
c = '.'; |
|
328 |
} |
|
329 |
st->print("%c", c); |
|
330 |
} |
|
331 |
if (limit > 0) { |
|
332 |
st->print("."); |
|
333 |
} |
|
8076
96d498ec7ae1
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
7397
diff
changeset
|
334 |
Symbol* n = m->name(); |
1 | 335 |
limit = n->utf8_length(); |
336 |
for (i = 0 ; i < limit ; i += 1) { |
|
337 |
char c = (char) n->byte_at(i); |
|
338 |
st->print("%c", c); |
|
339 |
} |
|
340 |
if( Verbose ) { |
|
341 |
// Disambiguate overloaded methods |
|
8076
96d498ec7ae1
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
7397
diff
changeset
|
342 |
Symbol* sig = m->signature(); |
1 | 343 |
sig->print_symbol_on(st); |
344 |
} |
|
345 |
} |
|
346 |
||
347 |
virtual void print(outputStream* st, int total_ticks) { |
|
348 |
ticks.print_code(st, total_ticks); |
|
349 |
st->fill_to(col2); |
|
350 |
ticks.print_native(st); |
|
351 |
st->fill_to(col3); |
|
352 |
print_method_on(st); |
|
353 |
st->cr(); |
|
354 |
} |
|
355 |
||
356 |
// for hashing into the table |
|
357 |
static int hash(methodOop method) { |
|
358 |
// The point here is to try to make something fairly unique |
|
359 |
// out of the fields we can read without grabbing any locks |
|
360 |
// since the method may be locked when we need the hash. |
|
361 |
return ( |
|
362 |
method->code_size() ^ |
|
363 |
method->max_stack() ^ |
|
364 |
method->max_locals() ^ |
|
365 |
method->size_of_parameters()); |
|
366 |
} |
|
367 |
||
368 |
// for sorting |
|
369 |
static int compare(ProfilerNode** a, ProfilerNode** b) { |
|
370 |
return (*b)->total_ticks() - (*a)->total_ticks(); |
|
371 |
} |
|
372 |
}; |
|
373 |
||
374 |
void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp){ |
|
375 |
void* result = (void*) tp->area_top; |
|
376 |
tp->area_top += size; |
|
377 |
||
378 |
if (tp->area_top > tp->area_limit) { |
|
379 |
fatal("flat profiler buffer overflow"); |
|
380 |
} |
|
381 |
return result; |
|
382 |
} |
|
383 |
||
384 |
void ProfilerNode::operator delete(void* p){ |
|
385 |
} |
|
386 |
||
387 |
class interpretedNode : public ProfilerNode { |
|
388 |
private: |
|
389 |
methodOop _method; |
|
390 |
public: |
|
391 |
interpretedNode(methodOop method, TickPosition where) : ProfilerNode() { |
|
392 |
_method = method; |
|
393 |
update(where); |
|
394 |
} |
|
395 |
||
396 |
bool is_interpreted() const { return true; } |
|
397 |
||
398 |
bool interpreted_match(methodOop m) const { |
|
399 |
return _method == m; |
|
400 |
} |
|
401 |
||
402 |
void oops_do(OopClosure* f) { |
|
403 |
f->do_oop((oop*)&_method); |
|
404 |
} |
|
405 |
||
406 |
methodOop method() { return _method; } |
|
407 |
||
408 |
static void print_title(outputStream* st) { |
|
409 |
st->fill_to(col1); |
|
410 |
st->print("%11s", "Interpreted"); |
|
411 |
ProfilerNode::print_title(st); |
|
412 |
} |
|
413 |
||
414 |
void print(outputStream* st, int total_ticks) { |
|
415 |
ProfilerNode::print(st, total_ticks); |
|
416 |
} |
|
417 |
||
418 |
void print_method_on(outputStream* st) { |
|
419 |
ProfilerNode::print_method_on(st); |
|
420 |
if (Verbose) method()->invocation_counter()->print_short(); |
|
421 |
} |
|
422 |
}; |
|
423 |
||
424 |
class compiledNode : public ProfilerNode { |
|
425 |
private: |
|
426 |
methodOop _method; |
|
427 |
public: |
|
428 |
compiledNode(methodOop method, TickPosition where) : ProfilerNode() { |
|
429 |
_method = method; |
|
430 |
update(where); |
|
431 |
} |
|
432 |
bool is_compiled() const { return true; } |
|
433 |
||
434 |
bool compiled_match(methodOop m) const { |
|
435 |
return _method == m; |
|
436 |
} |
|
437 |
||
438 |
methodOop method() { return _method; } |
|
439 |
||
440 |
void oops_do(OopClosure* f) { |
|
441 |
f->do_oop((oop*)&_method); |
|
442 |
} |
|
443 |
||
444 |
static void print_title(outputStream* st) { |
|
445 |
st->fill_to(col1); |
|
446 |
st->print("%11s", "Compiled"); |
|
447 |
ProfilerNode::print_title(st); |
|
448 |
} |
|
449 |
||
450 |
void print(outputStream* st, int total_ticks) { |
|
451 |
ProfilerNode::print(st, total_ticks); |
|
452 |
} |
|
453 |
||
454 |
void print_method_on(outputStream* st) { |
|
455 |
ProfilerNode::print_method_on(st); |
|
456 |
} |
|
457 |
}; |
|
458 |
||
459 |
class stubNode : public ProfilerNode { |
|
460 |
private: |
|
461 |
methodOop _method; |
|
462 |
const char* _symbol; // The name of the nearest VM symbol (for +ProfileVM). Points to a unique string |
|
463 |
public: |
|
464 |
stubNode(methodOop method, const char* name, TickPosition where) : ProfilerNode() { |
|
465 |
_method = method; |
|
466 |
_symbol = name; |
|
467 |
update(where); |
|
468 |
} |
|
469 |
||
470 |
bool is_stub() const { return true; } |
|
471 |
||
472 |
bool stub_match(methodOop m, const char* name) const { |
|
473 |
return (_method == m) && (_symbol == name); |
|
474 |
} |
|
475 |
||
476 |
void oops_do(OopClosure* f) { |
|
477 |
f->do_oop((oop*)&_method); |
|
478 |
} |
|
479 |
||
480 |
methodOop method() { return _method; } |
|
481 |
||
482 |
static void print_title(outputStream* st) { |
|
483 |
st->fill_to(col1); |
|
484 |
st->print("%11s", "Stub"); |
|
485 |
ProfilerNode::print_title(st); |
|
486 |
} |
|
487 |
||
488 |
void print(outputStream* st, int total_ticks) { |
|
489 |
ProfilerNode::print(st, total_ticks); |
|
490 |
} |
|
491 |
||
492 |
void print_method_on(outputStream* st) { |
|
493 |
ProfilerNode::print_method_on(st); |
|
494 |
print_symbol_on(st); |
|
495 |
} |
|
496 |
||
497 |
void print_symbol_on(outputStream* st) { |
|
498 |
if(_symbol) { |
|
499 |
st->print(" (%s)", _symbol); |
|
500 |
} |
|
501 |
} |
|
502 |
}; |
|
503 |
||
504 |
class adapterNode : public ProfilerNode { |
|
505 |
public: |
|
506 |
adapterNode(TickPosition where) : ProfilerNode() { |
|
507 |
update(where); |
|
508 |
} |
|
509 |
bool is_compiled() const { return true; } |
|
510 |
||
511 |
bool adapter_match() const { return true; } |
|
512 |
||
513 |
methodOop method() { return NULL; } |
|
514 |
||
515 |
void oops_do(OopClosure* f) { |
|
516 |
; |
|
517 |
} |
|
518 |
||
519 |
void print(outputStream* st, int total_ticks) { |
|
520 |
ProfilerNode::print(st, total_ticks); |
|
521 |
} |
|
522 |
||
523 |
void print_method_on(outputStream* st) { |
|
524 |
st->print("%s", "adapters"); |
|
525 |
} |
|
526 |
}; |
|
527 |
||
528 |
class runtimeStubNode : public ProfilerNode { |
|
529 |
private: |
|
530 |
const CodeBlob* _stub; |
|
531 |
const char* _symbol; // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string. |
|
532 |
public: |
|
533 |
runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(stub), _symbol(name) { |
|
534 |
assert(stub->is_runtime_stub(), "wrong code blob"); |
|
535 |
update(where); |
|
536 |
} |
|
537 |
||
538 |
bool is_runtime_stub() const { return true; } |
|
539 |
||
540 |
bool runtimeStub_match(const CodeBlob* stub, const char* name) const { |
|
541 |
assert(stub->is_runtime_stub(), "wrong code blob"); |
|
542 |
return ((RuntimeStub*)_stub)->entry_point() == ((RuntimeStub*)stub)->entry_point() && |
|
543 |
(_symbol == name); |
|
544 |
} |
|
545 |
||
546 |
methodOop method() { return NULL; } |
|
547 |
||
548 |
static void print_title(outputStream* st) { |
|
549 |
st->fill_to(col1); |
|
550 |
st->print("%11s", "Runtime stub"); |
|
551 |
ProfilerNode::print_title(st); |
|
552 |
} |
|
553 |
||
554 |
void oops_do(OopClosure* f) { |
|
555 |
; |
|
556 |
} |
|
557 |
||
558 |
void print(outputStream* st, int total_ticks) { |
|
559 |
ProfilerNode::print(st, total_ticks); |
|
560 |
} |
|
561 |
||
562 |
void print_method_on(outputStream* st) { |
|
563 |
st->print("%s", ((RuntimeStub*)_stub)->name()); |
|
564 |
print_symbol_on(st); |
|
565 |
} |
|
566 |
||
567 |
void print_symbol_on(outputStream* st) { |
|
568 |
if(_symbol) { |
|
569 |
st->print(" (%s)", _symbol); |
|
570 |
} |
|
571 |
} |
|
572 |
}; |
|
573 |
||
574 |
||
575 |
class unknown_compiledNode : public ProfilerNode { |
|
576 |
const char *_name; |
|
577 |
public: |
|
578 |
unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() { |
|
579 |
if ( cb->is_buffer_blob() ) |
|
580 |
_name = ((BufferBlob*)cb)->name(); |
|
581 |
else |
|
582 |
_name = ((SingletonBlob*)cb)->name(); |
|
583 |
update(where); |
|
584 |
} |
|
585 |
bool is_compiled() const { return true; } |
|
586 |
||
587 |
bool unknown_compiled_match(const CodeBlob* cb) const { |
|
588 |
if ( cb->is_buffer_blob() ) |
|
589 |
return !strcmp(((BufferBlob*)cb)->name(), _name); |
|
590 |
else |
|
591 |
return !strcmp(((SingletonBlob*)cb)->name(), _name); |
|
592 |
} |
|
593 |
||
594 |
methodOop method() { return NULL; } |
|
595 |
||
596 |
void oops_do(OopClosure* f) { |
|
597 |
; |
|
598 |
} |
|
599 |
||
600 |
void print(outputStream* st, int total_ticks) { |
|
601 |
ProfilerNode::print(st, total_ticks); |
|
602 |
} |
|
603 |
||
604 |
void print_method_on(outputStream* st) { |
|
605 |
st->print("%s", _name); |
|
606 |
} |
|
607 |
}; |
|
608 |
||
609 |
class vmNode : public ProfilerNode { |
|
610 |
private: |
|
611 |
const char* _name; // "optional" name obtained by os means such as dll lookup |
|
612 |
public: |
|
613 |
vmNode(const TickPosition where) : ProfilerNode() { |
|
614 |
_name = NULL; |
|
615 |
update(where); |
|
616 |
} |
|
617 |
||
618 |
vmNode(const char* name, const TickPosition where) : ProfilerNode() { |
|
619 |
_name = name; |
|
620 |
update(where); |
|
621 |
} |
|
622 |
||
623 |
const char *name() const { return _name; } |
|
624 |
bool is_compiled() const { return true; } |
|
625 |
||
626 |
bool vm_match(const char* name) const { return strcmp(name, _name) == 0; } |
|
627 |
||
628 |
methodOop method() { return NULL; } |
|
629 |
||
630 |
static int hash(const char* name){ |
|
631 |
// Compute a simple hash |
|
632 |
const char* cp = name; |
|
633 |
int h = 0; |
|
634 |
||
635 |
if(name != NULL){ |
|
636 |
while(*cp != '\0'){ |
|
637 |
h = (h << 1) ^ *cp; |
|
638 |
cp++; |
|
639 |
} |
|
640 |
} |
|
641 |
return h; |
|
642 |
} |
|
643 |
||
644 |
void oops_do(OopClosure* f) { |
|
645 |
; |
|
646 |
} |
|
647 |
||
648 |
void print(outputStream* st, int total_ticks) { |
|
649 |
ProfilerNode::print(st, total_ticks); |
|
650 |
} |
|
651 |
||
652 |
void print_method_on(outputStream* st) { |
|
653 |
if(_name==NULL){ |
|
654 |
st->print("%s", "unknown code"); |
|
655 |
} |
|
656 |
else { |
|
657 |
st->print("%s", _name); |
|
658 |
} |
|
659 |
} |
|
660 |
}; |
|
661 |
||
662 |
void ThreadProfiler::interpreted_update(methodOop method, TickPosition where) { |
|
663 |
int index = entry(ProfilerNode::hash(method)); |
|
664 |
if (!table[index]) { |
|
665 |
table[index] = new (this) interpretedNode(method, where); |
|
666 |
} else { |
|
667 |
ProfilerNode* prev = table[index]; |
|
668 |
for(ProfilerNode* node = prev; node; node = node->next()) { |
|
669 |
if (node->interpreted_match(method)) { |
|
670 |
node->update(where); |
|
671 |
return; |
|
672 |
} |
|
673 |
prev = node; |
|
674 |
} |
|
675 |
prev->set_next(new (this) interpretedNode(method, where)); |
|
676 |
} |
|
677 |
} |
|
678 |
||
679 |
void ThreadProfiler::compiled_update(methodOop method, TickPosition where) { |
|
680 |
int index = entry(ProfilerNode::hash(method)); |
|
681 |
if (!table[index]) { |
|
682 |
table[index] = new (this) compiledNode(method, where); |
|
683 |
} else { |
|
684 |
ProfilerNode* prev = table[index]; |
|
685 |
for(ProfilerNode* node = prev; node; node = node->next()) { |
|
686 |
if (node->compiled_match(method)) { |
|
687 |
node->update(where); |
|
688 |
return; |
|
689 |
} |
|
690 |
prev = node; |
|
691 |
} |
|
692 |
prev->set_next(new (this) compiledNode(method, where)); |
|
693 |
} |
|
694 |
} |
|
695 |
||
696 |
void ThreadProfiler::stub_update(methodOop method, const char* name, TickPosition where) { |
|
697 |
int index = entry(ProfilerNode::hash(method)); |
|
698 |
if (!table[index]) { |
|
699 |
table[index] = new (this) stubNode(method, name, where); |
|
700 |
} else { |
|
701 |
ProfilerNode* prev = table[index]; |
|
702 |
for(ProfilerNode* node = prev; node; node = node->next()) { |
|
703 |
if (node->stub_match(method, name)) { |
|
704 |
node->update(where); |
|
705 |
return; |
|
706 |
} |
|
707 |
prev = node; |
|
708 |
} |
|
709 |
prev->set_next(new (this) stubNode(method, name, where)); |
|
710 |
} |
|
711 |
} |
|
712 |
||
713 |
void ThreadProfiler::adapter_update(TickPosition where) { |
|
714 |
int index = 0; |
|
715 |
if (!table[index]) { |
|
716 |
table[index] = new (this) adapterNode(where); |
|
717 |
} else { |
|
718 |
ProfilerNode* prev = table[index]; |
|
719 |
for(ProfilerNode* node = prev; node; node = node->next()) { |
|
720 |
if (node->adapter_match()) { |
|
721 |
node->update(where); |
|
722 |
return; |
|
723 |
} |
|
724 |
prev = node; |
|
725 |
} |
|
726 |
prev->set_next(new (this) adapterNode(where)); |
|
727 |
} |
|
728 |
} |
|
729 |
||
730 |
void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) { |
|
731 |
int index = 0; |
|
732 |
if (!table[index]) { |
|
733 |
table[index] = new (this) runtimeStubNode(stub, name, where); |
|
734 |
} else { |
|
735 |
ProfilerNode* prev = table[index]; |
|
736 |
for(ProfilerNode* node = prev; node; node = node->next()) { |
|
737 |
if (node->runtimeStub_match(stub, name)) { |
|
738 |
node->update(where); |
|
739 |
return; |
|
740 |
} |
|
741 |
prev = node; |
|
742 |
} |
|
743 |
prev->set_next(new (this) runtimeStubNode(stub, name, where)); |
|
744 |
} |
|
745 |
} |
|
746 |
||
747 |
||
748 |
void ThreadProfiler::unknown_compiled_update(const CodeBlob* cb, TickPosition where) { |
|
749 |
int index = 0; |
|
750 |
if (!table[index]) { |
|
751 |
table[index] = new (this) unknown_compiledNode(cb, where); |
|
752 |
} else { |
|
753 |
ProfilerNode* prev = table[index]; |
|
754 |
for(ProfilerNode* node = prev; node; node = node->next()) { |
|
755 |
if (node->unknown_compiled_match(cb)) { |
|
756 |
node->update(where); |
|
757 |
return; |
|
758 |
} |
|
759 |
prev = node; |
|
760 |
} |
|
761 |
prev->set_next(new (this) unknown_compiledNode(cb, where)); |
|
762 |
} |
|
763 |
} |
|
764 |
||
765 |
void ThreadProfiler::vm_update(TickPosition where) { |
|
766 |
vm_update(NULL, where); |
|
767 |
} |
|
768 |
||
769 |
void ThreadProfiler::vm_update(const char* name, TickPosition where) { |
|
770 |
int index = entry(vmNode::hash(name)); |
|
771 |
assert(index >= 0, "Must be positive"); |
|
772 |
// Note that we call strdup below since the symbol may be resource allocated |
|
773 |
if (!table[index]) { |
|
774 |
table[index] = new (this) vmNode(os::strdup(name), where); |
|
775 |
} else { |
|
776 |
ProfilerNode* prev = table[index]; |
|
777 |
for(ProfilerNode* node = prev; node; node = node->next()) { |
|
778 |
if (((vmNode *)node)->vm_match(name)) { |
|
779 |
node->update(where); |
|
780 |
return; |
|
781 |
} |
|
782 |
prev = node; |
|
783 |
} |
|
784 |
prev->set_next(new (this) vmNode(os::strdup(name), where)); |
|
785 |
} |
|
786 |
} |
|
787 |
||
788 |
||
789 |
class FlatProfilerTask : public PeriodicTask { |
|
790 |
public: |
|
791 |
FlatProfilerTask(int interval_time) : PeriodicTask(interval_time) {} |
|
792 |
void task(); |
|
793 |
}; |
|
794 |
||
795 |
void FlatProfiler::record_vm_operation() { |
|
796 |
if (Universe::heap()->is_gc_active()) { |
|
797 |
FlatProfiler::received_gc_ticks += 1; |
|
798 |
return; |
|
799 |
} |
|
800 |
||
801 |
if (DeoptimizationMarker::is_active()) { |
|
802 |
FlatProfiler::deopt_ticks += 1; |
|
803 |
return; |
|
804 |
} |
|
805 |
||
806 |
FlatProfiler::vm_operation_ticks += 1; |
|
807 |
} |
|
808 |
||
809 |
void FlatProfiler::record_vm_tick() { |
|
810 |
// Profile the VM Thread itself if needed |
|
811 |
// This is done without getting the Threads_lock and we can go deep |
|
812 |
// inside Safepoint, etc. |
|
813 |
if( ProfileVM ) { |
|
814 |
ResourceMark rm; |
|
815 |
ExtendedPC epc; |
|
816 |
const char *name = NULL; |
|
817 |
char buf[256]; |
|
818 |
buf[0] = '\0'; |
|
819 |
||
820 |
vm_thread_profiler->inc_thread_ticks(); |
|
821 |
||
822 |
// Get a snapshot of a current VMThread pc (and leave it running!) |
|
823 |
// The call may fail if, for instance the VM thread is interrupted while |
|
824 |
// holding the Interrupt_lock or for other reasons. |
|
825 |
epc = os::get_thread_pc(VMThread::vm_thread()); |
|
826 |
if(epc.pc() != NULL) { |
|
827 |
if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) { |
|
828 |
name = buf; |
|
829 |
} |
|
830 |
} |
|
831 |
if (name != NULL) { |
|
832 |
vm_thread_profiler->vm_update(name, tp_native); |
|
833 |
} |
|
834 |
} |
|
835 |
} |
|
836 |
||
837 |
void FlatProfiler::record_thread_ticks() { |
|
838 |
||
839 |
int maxthreads, suspendedthreadcount; |
|
840 |
JavaThread** threadsList; |
|
841 |
bool interval_expired = false; |
|
842 |
||
843 |
if (ProfileIntervals && |
|
844 |
(FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) { |
|
845 |
interval_expired = true; |
|
846 |
interval_ticks_previous = FlatProfiler::received_ticks; |
|
847 |
} |
|
848 |
||
849 |
// Try not to wait for the Threads_lock |
|
850 |
if (Threads_lock->try_lock()) { |
|
851 |
{ // Threads_lock scope |
|
852 |
maxthreads = Threads::number_of_threads(); |
|
853 |
threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads); |
|
854 |
suspendedthreadcount = 0; |
|
855 |
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { |
|
856 |
if (tp->is_Compiler_thread()) { |
|
857 |
// Only record ticks for active compiler threads |
|
858 |
CompilerThread* cthread = (CompilerThread*)tp; |
|
859 |
if (cthread->task() != NULL) { |
|
860 |
// The compiler is active. If we need to access any of the fields |
|
861 |
// of the compiler task we should suspend the CompilerThread first. |
|
862 |
FlatProfiler::compiler_ticks += 1; |
|
863 |
continue; |
|
864 |
} |
|
865 |
} |
|
866 |
||
867 |
// First externally suspend all threads by marking each for |
|
868 |
// external suspension - so it will stop at its next transition |
|
869 |
// Then do a safepoint |
|
870 |
ThreadProfiler* pp = tp->get_thread_profiler(); |
|
871 |
if (pp != NULL && pp->engaged) { |
|
872 |
MutexLockerEx ml(tp->SR_lock(), Mutex::_no_safepoint_check_flag); |
|
873 |
if (!tp->is_external_suspend() && !tp->is_exiting()) { |
|
874 |
tp->set_external_suspend(); |
|
875 |
threadsList[suspendedthreadcount++] = tp; |
|
876 |
} |
|
877 |
} |
|
878 |
} |
|
879 |
Threads_lock->unlock(); |
|
880 |
} |
|
881 |
// Suspend each thread. This call should just return |
|
882 |
// for any threads that have already self-suspended |
|
883 |
// Net result should be one safepoint |
|
884 |
for (int j = 0; j < suspendedthreadcount; j++) { |
|
885 |
JavaThread *tp = threadsList[j]; |
|
886 |
if (tp) { |
|
887 |
tp->java_suspend(); |
|
888 |
} |
|
889 |
} |
|
890 |
||
891 |
// We are responsible for resuming any thread on this list |
|
892 |
for (int i = 0; i < suspendedthreadcount; i++) { |
|
893 |
JavaThread *tp = threadsList[i]; |
|
894 |
if (tp) { |
|
895 |
ThreadProfiler* pp = tp->get_thread_profiler(); |
|
896 |
if (pp != NULL && pp->engaged) { |
|
897 |
HandleMark hm; |
|
898 |
FlatProfiler::delivered_ticks += 1; |
|
899 |
if (interval_expired) { |
|
900 |
FlatProfiler::interval_record_thread(pp); |
|
901 |
} |
|
902 |
// This is the place where we check to see if a user thread is |
|
903 |
// blocked waiting for compilation. |
|
904 |
if (tp->blocked_on_compilation()) { |
|
905 |
pp->compiler_ticks += 1; |
|
906 |
pp->interval_data_ref()->inc_compiling(); |
|
907 |
} else { |
|
908 |
pp->record_tick(tp); |
|
909 |
} |
|
910 |
} |
|
911 |
MutexLocker ml(Threads_lock); |
|
912 |
tp->java_resume(); |
|
913 |
} |
|
914 |
} |
|
915 |
if (interval_expired) { |
|
916 |
FlatProfiler::interval_print(); |
|
917 |
FlatProfiler::interval_reset(); |
|
918 |
} |
|
919 |
} else { |
|
920 |
// Couldn't get the threads lock, just record that rather than blocking |
|
921 |
FlatProfiler::threads_lock_ticks += 1; |
|
922 |
} |
|
923 |
||
924 |
} |
|
925 |
||
926 |
void FlatProfilerTask::task() { |
|
927 |
FlatProfiler::received_ticks += 1; |
|
928 |
||
929 |
if (ProfileVM) { |
|
930 |
FlatProfiler::record_vm_tick(); |
|
931 |
} |
|
932 |
||
933 |
VM_Operation* op = VMThread::vm_operation(); |
|
934 |
if (op != NULL) { |
|
935 |
FlatProfiler::record_vm_operation(); |
|
936 |
if (SafepointSynchronize::is_at_safepoint()) { |
|
937 |
return; |
|
938 |
} |
|
939 |
} |
|
940 |
FlatProfiler::record_thread_ticks(); |
|
941 |
} |
|
942 |
||
354
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
943 |
void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) { |
1 | 944 |
FlatProfiler::all_int_ticks++; |
945 |
if (!FlatProfiler::full_profile()) { |
|
946 |
return; |
|
947 |
} |
|
948 |
||
354
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
949 |
if (!fr.is_interpreted_frame_valid(thread)) { |
1 | 950 |
// tick came at a bad time |
951 |
interpreter_ticks += 1; |
|
952 |
FlatProfiler::interpreter_ticks += 1; |
|
953 |
return; |
|
954 |
} |
|
955 |
||
354
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
956 |
// The frame has been fully validated so we can trust the method and bci |
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
957 |
|
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
958 |
methodOop method = *fr.interpreter_frame_method_addr(); |
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
959 |
|
1 | 960 |
interpreted_update(method, where); |
961 |
||
962 |
// update byte code table |
|
963 |
InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc()); |
|
964 |
if (desc != NULL && desc->bytecode() >= 0) { |
|
965 |
ticks[desc->bytecode()]++; |
|
966 |
} |
|
967 |
} |
|
968 |
||
969 |
void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosition where) { |
|
970 |
const char *name = NULL; |
|
971 |
TickPosition localwhere = where; |
|
972 |
||
973 |
FlatProfiler::all_comp_ticks++; |
|
974 |
if (!FlatProfiler::full_profile()) return; |
|
975 |
||
976 |
CodeBlob* cb = fr.cb(); |
|
977 |
||
978 |
// For runtime stubs, record as native rather than as compiled |
|
979 |
if (cb->is_runtime_stub()) { |
|
980 |
RegisterMap map(thread, false); |
|
981 |
fr = fr.sender(&map); |
|
982 |
cb = fr.cb(); |
|
983 |
localwhere = tp_native; |
|
984 |
} |
|
985 |
methodOop method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() : |
|
986 |
(methodOop)NULL; |
|
987 |
||
988 |
if (method == NULL) { |
|
989 |
if (cb->is_runtime_stub()) |
|
990 |
runtime_stub_update(cb, name, localwhere); |
|
991 |
else |
|
992 |
unknown_compiled_update(cb, localwhere); |
|
993 |
} |
|
994 |
else { |
|
995 |
if (method->is_native()) { |
|
996 |
stub_update(method, name, localwhere); |
|
997 |
} else { |
|
998 |
compiled_update(method, localwhere); |
|
999 |
} |
|
1000 |
} |
|
1001 |
} |
|
1002 |
||
1003 |
extern "C" void find(int x); |
|
1004 |
||
1005 |
||
1006 |
void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) { |
|
2131 | 1007 |
// The tick happened in real code -> non VM code |
1 | 1008 |
if (fr.is_interpreted_frame()) { |
1009 |
interval_data_ref()->inc_interpreted(); |
|
354
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
1010 |
record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks); |
1 | 1011 |
return; |
1012 |
} |
|
1013 |
||
1014 |
if (CodeCache::contains(fr.pc())) { |
|
1015 |
interval_data_ref()->inc_compiled(); |
|
1016 |
PCRecorder::record(fr.pc()); |
|
1017 |
record_compiled_tick(thread, fr, tp_code); |
|
1018 |
return; |
|
1019 |
} |
|
1020 |
||
1021 |
if (VtableStubs::stub_containing(fr.pc()) != NULL) { |
|
1022 |
unknown_ticks_array[ut_vtable_stubs] += 1; |
|
1023 |
return; |
|
1024 |
} |
|
1025 |
||
1026 |
frame caller = fr.profile_find_Java_sender_frame(thread); |
|
1027 |
||
1028 |
if (caller.sp() != NULL && caller.pc() != NULL) { |
|
1029 |
record_tick_for_calling_frame(thread, caller); |
|
1030 |
return; |
|
1031 |
} |
|
1032 |
||
1033 |
unknown_ticks_array[ut_running_frame] += 1; |
|
1034 |
FlatProfiler::unknown_ticks += 1; |
|
1035 |
} |
|
1036 |
||
1037 |
void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) { |
|
2131 | 1038 |
// The tick happened in VM code |
1 | 1039 |
interval_data_ref()->inc_native(); |
1040 |
if (fr.is_interpreted_frame()) { |
|
354
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
1041 |
record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub); |
1 | 1042 |
return; |
1043 |
} |
|
1044 |
if (CodeCache::contains(fr.pc())) { |
|
1045 |
record_compiled_tick(thread, fr, tp_native); |
|
1046 |
return; |
|
1047 |
} |
|
1048 |
||
1049 |
frame caller = fr.profile_find_Java_sender_frame(thread); |
|
1050 |
||
1051 |
if (caller.sp() != NULL && caller.pc() != NULL) { |
|
1052 |
record_tick_for_calling_frame(thread, caller); |
|
1053 |
return; |
|
1054 |
} |
|
1055 |
||
1056 |
unknown_ticks_array[ut_calling_frame] += 1; |
|
1057 |
FlatProfiler::unknown_ticks += 1; |
|
1058 |
} |
|
1059 |
||
1060 |
void ThreadProfiler::record_tick(JavaThread* thread) { |
|
1061 |
FlatProfiler::all_ticks++; |
|
1062 |
thread_ticks += 1; |
|
1063 |
||
1064 |
// Here's another way to track global state changes. |
|
1065 |
// When the class loader starts it marks the ThreadProfiler to tell it it is in the class loader |
|
1066 |
// and we check that here. |
|
1067 |
// This is more direct, and more than one thread can be in the class loader at a time, |
|
1068 |
// but it does mean the class loader has to know about the profiler. |
|
1069 |
if (region_flag[ThreadProfilerMark::classLoaderRegion]) { |
|
1070 |
class_loader_ticks += 1; |
|
1071 |
FlatProfiler::class_loader_ticks += 1; |
|
1072 |
return; |
|
1073 |
} else if (region_flag[ThreadProfilerMark::extraRegion]) { |
|
1074 |
extra_ticks += 1; |
|
1075 |
FlatProfiler::extra_ticks += 1; |
|
1076 |
return; |
|
1077 |
} |
|
1078 |
// Note that the WatcherThread can now stop for safepoints |
|
1079 |
uint32_t debug_bits = 0; |
|
1080 |
if (!thread->wait_for_ext_suspend_completion(SuspendRetryCount, |
|
1081 |
SuspendRetryDelay, &debug_bits)) { |
|
1082 |
unknown_ticks_array[ut_unknown_thread_state] += 1; |
|
1083 |
FlatProfiler::unknown_ticks += 1; |
|
1084 |
return; |
|
1085 |
} |
|
1086 |
||
1087 |
frame fr; |
|
1088 |
||
1089 |
switch (thread->thread_state()) { |
|
1090 |
case _thread_in_native: |
|
1091 |
case _thread_in_native_trans: |
|
1092 |
case _thread_in_vm: |
|
1093 |
case _thread_in_vm_trans: |
|
1094 |
if (thread->profile_last_Java_frame(&fr)) { |
|
1095 |
if (fr.is_runtime_frame()) { |
|
1096 |
RegisterMap map(thread, false); |
|
1097 |
fr = fr.sender(&map); |
|
1098 |
} |
|
1099 |
record_tick_for_calling_frame(thread, fr); |
|
1100 |
} else { |
|
1101 |
unknown_ticks_array[ut_no_last_Java_frame] += 1; |
|
1102 |
FlatProfiler::unknown_ticks += 1; |
|
1103 |
} |
|
1104 |
break; |
|
1105 |
// handle_special_runtime_exit_condition self-suspends threads in Java |
|
1106 |
case _thread_in_Java: |
|
1107 |
case _thread_in_Java_trans: |
|
1108 |
if (thread->profile_last_Java_frame(&fr)) { |
|
1109 |
if (fr.is_safepoint_blob_frame()) { |
|
1110 |
RegisterMap map(thread, false); |
|
1111 |
fr = fr.sender(&map); |
|
1112 |
} |
|
1113 |
record_tick_for_running_frame(thread, fr); |
|
1114 |
} else { |
|
1115 |
unknown_ticks_array[ut_no_last_Java_frame] += 1; |
|
1116 |
FlatProfiler::unknown_ticks += 1; |
|
1117 |
} |
|
1118 |
break; |
|
1119 |
case _thread_blocked: |
|
1120 |
case _thread_blocked_trans: |
|
1121 |
if (thread->osthread() && thread->osthread()->get_state() == RUNNABLE) { |
|
1122 |
if (thread->profile_last_Java_frame(&fr)) { |
|
1123 |
if (fr.is_safepoint_blob_frame()) { |
|
1124 |
RegisterMap map(thread, false); |
|
1125 |
fr = fr.sender(&map); |
|
1126 |
record_tick_for_running_frame(thread, fr); |
|
1127 |
} else { |
|
1128 |
record_tick_for_calling_frame(thread, fr); |
|
1129 |
} |
|
1130 |
} else { |
|
1131 |
unknown_ticks_array[ut_no_last_Java_frame] += 1; |
|
1132 |
FlatProfiler::unknown_ticks += 1; |
|
1133 |
} |
|
1134 |
} else { |
|
1135 |
blocked_ticks += 1; |
|
1136 |
FlatProfiler::blocked_ticks += 1; |
|
1137 |
} |
|
1138 |
break; |
|
1139 |
case _thread_uninitialized: |
|
1140 |
case _thread_new: |
|
1141 |
// not used, included for completeness |
|
1142 |
case _thread_new_trans: |
|
1143 |
unknown_ticks_array[ut_no_last_Java_frame] += 1; |
|
1144 |
FlatProfiler::unknown_ticks += 1; |
|
1145 |
break; |
|
1146 |
default: |
|
1147 |
unknown_ticks_array[ut_unknown_thread_state] += 1; |
|
1148 |
FlatProfiler::unknown_ticks += 1; |
|
1149 |
break; |
|
1150 |
} |
|
1151 |
return; |
|
1152 |
} |
|
1153 |
||
1154 |
void ThreadProfiler::engage() { |
|
1155 |
engaged = true; |
|
1156 |
timer.start(); |
|
1157 |
} |
|
1158 |
||
1159 |
void ThreadProfiler::disengage() { |
|
1160 |
engaged = false; |
|
1161 |
timer.stop(); |
|
1162 |
} |
|
1163 |
||
1164 |
void ThreadProfiler::initialize() { |
|
1165 |
for (int index = 0; index < table_size; index++) { |
|
1166 |
table[index] = NULL; |
|
1167 |
} |
|
1168 |
thread_ticks = 0; |
|
1169 |
blocked_ticks = 0; |
|
1170 |
compiler_ticks = 0; |
|
1171 |
interpreter_ticks = 0; |
|
1172 |
for (int ut = 0; ut < ut_end; ut += 1) { |
|
1173 |
unknown_ticks_array[ut] = 0; |
|
1174 |
} |
|
1175 |
region_flag[ThreadProfilerMark::classLoaderRegion] = false; |
|
1176 |
class_loader_ticks = 0; |
|
1177 |
region_flag[ThreadProfilerMark::extraRegion] = false; |
|
1178 |
extra_ticks = 0; |
|
1179 |
timer.start(); |
|
1180 |
interval_data_ref()->reset(); |
|
1181 |
} |
|
1182 |
||
1183 |
void ThreadProfiler::reset() { |
|
1184 |
timer.stop(); |
|
1185 |
if (table != NULL) { |
|
1186 |
for (int index = 0; index < table_size; index++) { |
|
1187 |
ProfilerNode* n = table[index]; |
|
1188 |
if (n != NULL) { |
|
1189 |
delete n; |
|
1190 |
} |
|
1191 |
} |
|
1192 |
} |
|
1193 |
initialize(); |
|
1194 |
} |
|
1195 |
||
1196 |
void FlatProfiler::allocate_table() { |
|
1197 |
{ // Bytecode table |
|
1198 |
bytecode_ticks = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes); |
|
1199 |
bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes); |
|
1200 |
for(int index = 0; index < Bytecodes::number_of_codes; index++) { |
|
1201 |
bytecode_ticks[index] = 0; |
|
1202 |
bytecode_ticks_stub[index] = 0; |
|
1203 |
} |
|
1204 |
} |
|
1205 |
||
1206 |
if (ProfilerRecordPC) PCRecorder::init(); |
|
1207 |
||
1208 |
interval_data = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size); |
|
1209 |
FlatProfiler::interval_reset(); |
|
1210 |
} |
|
1211 |
||
1212 |
void FlatProfiler::engage(JavaThread* mainThread, bool fullProfile) { |
|
1213 |
full_profile_flag = fullProfile; |
|
1214 |
if (bytecode_ticks == NULL) { |
|
1215 |
allocate_table(); |
|
1216 |
} |
|
1217 |
if(ProfileVM && (vm_thread_profiler == NULL)){ |
|
1218 |
vm_thread_profiler = new ThreadProfiler(); |
|
1219 |
} |
|
1220 |
if (task == NULL) { |
|
1221 |
task = new FlatProfilerTask(WatcherThread::delay_interval); |
|
1222 |
task->enroll(); |
|
1223 |
} |
|
1224 |
timer.start(); |
|
1225 |
if (mainThread != NULL) { |
|
1226 |
// When mainThread was created, it might not have a ThreadProfiler |
|
1227 |
ThreadProfiler* pp = mainThread->get_thread_profiler(); |
|
1228 |
if (pp == NULL) { |
|
1229 |
mainThread->set_thread_profiler(new ThreadProfiler()); |
|
1230 |
} else { |
|
1231 |
pp->reset(); |
|
1232 |
} |
|
1233 |
mainThread->get_thread_profiler()->engage(); |
|
1234 |
} |
|
1235 |
// This is where we would assign thread_profiler |
|
1236 |
// if we wanted only one thread_profiler for all threads. |
|
1237 |
thread_profiler = NULL; |
|
1238 |
} |
|
1239 |
||
1240 |
void FlatProfiler::disengage() { |
|
1241 |
if (!task) { |
|
1242 |
return; |
|
1243 |
} |
|
1244 |
timer.stop(); |
|
1245 |
task->disenroll(); |
|
1246 |
delete task; |
|
1247 |
task = NULL; |
|
1248 |
if (thread_profiler != NULL) { |
|
1249 |
thread_profiler->disengage(); |
|
1250 |
} else { |
|
1251 |
MutexLocker tl(Threads_lock); |
|
1252 |
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { |
|
1253 |
ThreadProfiler* pp = tp->get_thread_profiler(); |
|
1254 |
if (pp != NULL) { |
|
1255 |
pp->disengage(); |
|
1256 |
} |
|
1257 |
} |
|
1258 |
} |
|
1259 |
} |
|
1260 |
||
1261 |
void FlatProfiler::reset() { |
|
1262 |
if (task) { |
|
1263 |
disengage(); |
|
1264 |
} |
|
1265 |
||
1266 |
class_loader_ticks = 0; |
|
1267 |
extra_ticks = 0; |
|
1268 |
received_gc_ticks = 0; |
|
1269 |
vm_operation_ticks = 0; |
|
1270 |
compiler_ticks = 0; |
|
1271 |
deopt_ticks = 0; |
|
1272 |
interpreter_ticks = 0; |
|
1273 |
blocked_ticks = 0; |
|
1274 |
unknown_ticks = 0; |
|
1275 |
received_ticks = 0; |
|
1276 |
delivered_ticks = 0; |
|
1277 |
timer.stop(); |
|
1278 |
} |
|
1279 |
||
1280 |
bool FlatProfiler::is_active() { |
|
1281 |
return task != NULL; |
|
1282 |
} |
|
1283 |
||
1284 |
void FlatProfiler::print_byte_code_statistics() { |
|
1285 |
GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200); |
|
1286 |
||
1287 |
tty->print_cr(" Bytecode ticks:"); |
|
1288 |
for (int index = 0; index < Bytecodes::number_of_codes; index++) { |
|
1289 |
if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) { |
|
1290 |
tty->print_cr(" %4d %4d = %s", |
|
1291 |
FlatProfiler::bytecode_ticks[index], |
|
1292 |
FlatProfiler::bytecode_ticks_stub[index], |
|
1293 |
Bytecodes::name( (Bytecodes::Code) index)); |
|
1294 |
} |
|
1295 |
} |
|
1296 |
tty->cr(); |
|
1297 |
} |
|
1298 |
||
1299 |
void print_ticks(const char* title, int ticks, int total) { |
|
1300 |
if (ticks > 0) { |
|
1301 |
tty->print("%5.1f%% %5d", ticks * 100.0 / total, ticks); |
|
1302 |
tty->fill_to(col3); |
|
1303 |
tty->print("%s", title); |
|
1304 |
tty->cr(); |
|
1305 |
} |
|
1306 |
} |
|
1307 |
||
1308 |
void ThreadProfiler::print(const char* thread_name) { |
|
1309 |
ResourceMark rm; |
|
1310 |
MutexLocker ppl(ProfilePrint_lock); |
|
1311 |
int index = 0; // Declared outside for loops for portability |
|
1312 |
||
1313 |
if (table == NULL) { |
|
1314 |
return; |
|
1315 |
} |
|
1316 |
||
1317 |
if (thread_ticks <= 0) { |
|
1318 |
return; |
|
1319 |
} |
|
1320 |
||
1321 |
const char* title = "too soon to tell"; |
|
1322 |
double secs = timer.seconds(); |
|
1323 |
||
1324 |
GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200); |
|
1325 |
for(index = 0; index < table_size; index++) { |
|
1326 |
for(ProfilerNode* node = table[index]; node; node = node->next()) |
|
1327 |
array->append(node); |
|
1328 |
} |
|
1329 |
||
1330 |
array->sort(&ProfilerNode::compare); |
|
1331 |
||
1332 |
// compute total (sanity check) |
|
1333 |
int active = |
|
1334 |
class_loader_ticks + |
|
1335 |
compiler_ticks + |
|
1336 |
interpreter_ticks + |
|
1337 |
unknown_ticks(); |
|
1338 |
for (index = 0; index < array->length(); index++) { |
|
1339 |
active += array->at(index)->ticks.total(); |
|
1340 |
} |
|
1341 |
int total = active + blocked_ticks; |
|
1342 |
||
1343 |
tty->cr(); |
|
1344 |
tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name); |
|
1345 |
if (total != thread_ticks) { |
|
1346 |
print_ticks("Lost ticks", thread_ticks-total, thread_ticks); |
|
1347 |
} |
|
1348 |
tty->cr(); |
|
1349 |
||
1350 |
// print interpreted methods |
|
1351 |
tick_counter interpreted_ticks; |
|
1352 |
bool has_interpreted_ticks = false; |
|
1353 |
int print_count = 0; |
|
1354 |
for (index = 0; index < array->length(); index++) { |
|
1355 |
ProfilerNode* n = array->at(index); |
|
1356 |
if (n->is_interpreted()) { |
|
1357 |
interpreted_ticks.add(&n->ticks); |
|
1358 |
if (!has_interpreted_ticks) { |
|
1359 |
interpretedNode::print_title(tty); |
|
1360 |
has_interpreted_ticks = true; |
|
1361 |
} |
|
1362 |
if (print_count++ < ProfilerNumberOfInterpretedMethods) { |
|
1363 |
n->print(tty, active); |
|
1364 |
} |
|
1365 |
} |
|
1366 |
} |
|
1367 |
if (has_interpreted_ticks) { |
|
1368 |
if (print_count <= ProfilerNumberOfInterpretedMethods) { |
|
1369 |
title = "Total interpreted"; |
|
1370 |
} else { |
|
1371 |
title = "Total interpreted (including elided)"; |
|
1372 |
} |
|
1373 |
interpretedNode::print_total(tty, &interpreted_ticks, active, title); |
|
1374 |
tty->cr(); |
|
1375 |
} |
|
1376 |
||
1377 |
// print compiled methods |
|
1378 |
tick_counter compiled_ticks; |
|
1379 |
bool has_compiled_ticks = false; |
|
1380 |
print_count = 0; |
|
1381 |
for (index = 0; index < array->length(); index++) { |
|
1382 |
ProfilerNode* n = array->at(index); |
|
1383 |
if (n->is_compiled()) { |
|
1384 |
compiled_ticks.add(&n->ticks); |
|
1385 |
if (!has_compiled_ticks) { |
|
1386 |
compiledNode::print_title(tty); |
|
1387 |
has_compiled_ticks = true; |
|
1388 |
} |
|
1389 |
if (print_count++ < ProfilerNumberOfCompiledMethods) { |
|
1390 |
n->print(tty, active); |
|
1391 |
} |
|
1392 |
} |
|
1393 |
} |
|
1394 |
if (has_compiled_ticks) { |
|
1395 |
if (print_count <= ProfilerNumberOfCompiledMethods) { |
|
1396 |
title = "Total compiled"; |
|
1397 |
} else { |
|
1398 |
title = "Total compiled (including elided)"; |
|
1399 |
} |
|
1400 |
compiledNode::print_total(tty, &compiled_ticks, active, title); |
|
1401 |
tty->cr(); |
|
1402 |
} |
|
1403 |
||
1404 |
// print stub methods |
|
1405 |
tick_counter stub_ticks; |
|
1406 |
bool has_stub_ticks = false; |
|
1407 |
print_count = 0; |
|
1408 |
for (index = 0; index < array->length(); index++) { |
|
1409 |
ProfilerNode* n = array->at(index); |
|
1410 |
if (n->is_stub()) { |
|
1411 |
stub_ticks.add(&n->ticks); |
|
1412 |
if (!has_stub_ticks) { |
|
1413 |
stubNode::print_title(tty); |
|
1414 |
has_stub_ticks = true; |
|
1415 |
} |
|
1416 |
if (print_count++ < ProfilerNumberOfStubMethods) { |
|
1417 |
n->print(tty, active); |
|
1418 |
} |
|
1419 |
} |
|
1420 |
} |
|
1421 |
if (has_stub_ticks) { |
|
1422 |
if (print_count <= ProfilerNumberOfStubMethods) { |
|
1423 |
title = "Total stub"; |
|
1424 |
} else { |
|
1425 |
title = "Total stub (including elided)"; |
|
1426 |
} |
|
1427 |
stubNode::print_total(tty, &stub_ticks, active, title); |
|
1428 |
tty->cr(); |
|
1429 |
} |
|
1430 |
||
1431 |
// print runtime stubs |
|
1432 |
tick_counter runtime_stub_ticks; |
|
1433 |
bool has_runtime_stub_ticks = false; |
|
1434 |
print_count = 0; |
|
1435 |
for (index = 0; index < array->length(); index++) { |
|
1436 |
ProfilerNode* n = array->at(index); |
|
1437 |
if (n->is_runtime_stub()) { |
|
1438 |
runtime_stub_ticks.add(&n->ticks); |
|
1439 |
if (!has_runtime_stub_ticks) { |
|
1440 |
runtimeStubNode::print_title(tty); |
|
1441 |
has_runtime_stub_ticks = true; |
|
1442 |
} |
|
1443 |
if (print_count++ < ProfilerNumberOfRuntimeStubNodes) { |
|
1444 |
n->print(tty, active); |
|
1445 |
} |
|
1446 |
} |
|
1447 |
} |
|
1448 |
if (has_runtime_stub_ticks) { |
|
1449 |
if (print_count <= ProfilerNumberOfRuntimeStubNodes) { |
|
1450 |
title = "Total runtime stubs"; |
|
1451 |
} else { |
|
1452 |
title = "Total runtime stubs (including elided)"; |
|
1453 |
} |
|
1454 |
runtimeStubNode::print_total(tty, &runtime_stub_ticks, active, title); |
|
1455 |
tty->cr(); |
|
1456 |
} |
|
1457 |
||
1458 |
if (blocked_ticks + class_loader_ticks + interpreter_ticks + compiler_ticks + unknown_ticks() != 0) { |
|
1459 |
tty->fill_to(col1); |
|
1460 |
tty->print_cr("Thread-local ticks:"); |
|
1461 |
print_ticks("Blocked (of total)", blocked_ticks, total); |
|
1462 |
print_ticks("Class loader", class_loader_ticks, active); |
|
1463 |
print_ticks("Extra", extra_ticks, active); |
|
1464 |
print_ticks("Interpreter", interpreter_ticks, active); |
|
1465 |
print_ticks("Compilation", compiler_ticks, active); |
|
1466 |
print_ticks("Unknown: vtable stubs", unknown_ticks_array[ut_vtable_stubs], active); |
|
1467 |
print_ticks("Unknown: null method", unknown_ticks_array[ut_null_method], active); |
|
1468 |
print_ticks("Unknown: running frame", unknown_ticks_array[ut_running_frame], active); |
|
1469 |
print_ticks("Unknown: calling frame", unknown_ticks_array[ut_calling_frame], active); |
|
1470 |
print_ticks("Unknown: no pc", unknown_ticks_array[ut_no_pc], active); |
|
1471 |
print_ticks("Unknown: no last frame", unknown_ticks_array[ut_no_last_Java_frame], active); |
|
1472 |
print_ticks("Unknown: thread_state", unknown_ticks_array[ut_unknown_thread_state], active); |
|
1473 |
tty->cr(); |
|
1474 |
} |
|
1475 |
||
1476 |
if (WizardMode) { |
|
1477 |
tty->print_cr("Node area used: %dKb", (area_top - area_bottom) / 1024); |
|
1478 |
} |
|
1479 |
reset(); |
|
1480 |
} |
|
1481 |
||
1482 |
/* |
|
1483 |
ThreadProfiler::print_unknown(){ |
|
1484 |
if (table == NULL) { |
|
1485 |
return; |
|
1486 |
} |
|
1487 |
||
1488 |
if (thread_ticks <= 0) { |
|
1489 |
return; |
|
1490 |
} |
|
1491 |
} */ |
|
1492 |
||
1493 |
void FlatProfiler::print(int unused) { |
|
1494 |
ResourceMark rm; |
|
1495 |
if (thread_profiler != NULL) { |
|
1496 |
thread_profiler->print("All threads"); |
|
1497 |
} else { |
|
1498 |
MutexLocker tl(Threads_lock); |
|
1499 |
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { |
|
1500 |
ThreadProfiler* pp = tp->get_thread_profiler(); |
|
1501 |
if (pp != NULL) { |
|
1502 |
pp->print(tp->get_thread_name()); |
|
1503 |
} |
|
1504 |
} |
|
1505 |
} |
|
1506 |
||
1507 |
if (ProfilerPrintByteCodeStatistics) { |
|
1508 |
print_byte_code_statistics(); |
|
1509 |
} |
|
1510 |
||
1511 |
if (non_method_ticks() > 0) { |
|
1512 |
tty->cr(); |
|
1513 |
tty->print_cr("Global summary of %3.2f seconds:", timer.seconds()); |
|
1514 |
print_ticks("Received ticks", received_ticks, received_ticks); |
|
1515 |
print_ticks("Received GC ticks", received_gc_ticks, received_ticks); |
|
1516 |
print_ticks("Compilation", compiler_ticks, received_ticks); |
|
1517 |
print_ticks("Deoptimization", deopt_ticks, received_ticks); |
|
1518 |
print_ticks("Other VM operations", vm_operation_ticks, received_ticks); |
|
1519 |
#ifndef PRODUCT |
|
1520 |
print_ticks("Blocked ticks", blocked_ticks, received_ticks); |
|
1521 |
print_ticks("Threads_lock blocks", threads_lock_ticks, received_ticks); |
|
1522 |
print_ticks("Delivered ticks", delivered_ticks, received_ticks); |
|
1523 |
print_ticks("All ticks", all_ticks, received_ticks); |
|
1524 |
#endif |
|
1525 |
print_ticks("Class loader", class_loader_ticks, received_ticks); |
|
1526 |
print_ticks("Extra ", extra_ticks, received_ticks); |
|
1527 |
print_ticks("Interpreter", interpreter_ticks, received_ticks); |
|
1528 |
print_ticks("Unknown code", unknown_ticks, received_ticks); |
|
1529 |
} |
|
1530 |
||
1531 |
PCRecorder::print(); |
|
1532 |
||
1533 |
if(ProfileVM){ |
|
1534 |
tty->cr(); |
|
1535 |
vm_thread_profiler->print("VM Thread"); |
|
1536 |
} |
|
1537 |
} |
|
1538 |
||
1539 |
void IntervalData::print_header(outputStream* st) { |
|
1540 |
st->print("i/c/n/g"); |
|
1541 |
} |
|
1542 |
||
1543 |
void IntervalData::print_data(outputStream* st) { |
|
1544 |
st->print("%d/%d/%d/%d", interpreted(), compiled(), native(), compiling()); |
|
1545 |
} |
|
1546 |
||
1547 |
void FlatProfiler::interval_record_thread(ThreadProfiler* tp) { |
|
1548 |
IntervalData id = tp->interval_data(); |
|
1549 |
int total = id.total(); |
|
1550 |
tp->interval_data_ref()->reset(); |
|
1551 |
||
1552 |
// Insertion sort the data, if it's relevant. |
|
1553 |
for (int i = 0; i < interval_print_size; i += 1) { |
|
1554 |
if (total > interval_data[i].total()) { |
|
1555 |
for (int j = interval_print_size - 1; j > i; j -= 1) { |
|
1556 |
interval_data[j] = interval_data[j-1]; |
|
1557 |
} |
|
1558 |
interval_data[i] = id; |
|
1559 |
break; |
|
1560 |
} |
|
1561 |
} |
|
1562 |
} |
|
1563 |
||
1564 |
void FlatProfiler::interval_print() { |
|
1565 |
if ((interval_data[0].total() > 0)) { |
|
1566 |
tty->stamp(); |
|
1567 |
tty->print("\t"); |
|
1568 |
IntervalData::print_header(tty); |
|
1569 |
for (int i = 0; i < interval_print_size; i += 1) { |
|
1570 |
if (interval_data[i].total() > 0) { |
|
1571 |
tty->print("\t"); |
|
1572 |
interval_data[i].print_data(tty); |
|
1573 |
} |
|
1574 |
} |
|
1575 |
tty->cr(); |
|
1576 |
} |
|
1577 |
} |
|
1578 |
||
1579 |
void FlatProfiler::interval_reset() { |
|
1580 |
for (int i = 0; i < interval_print_size; i += 1) { |
|
1581 |
interval_data[i].reset(); |
|
1582 |
} |
|
1583 |
} |
|
1584 |
||
1585 |
void ThreadProfiler::oops_do(OopClosure* f) { |
|
1586 |
if (table == NULL) return; |
|
1587 |
||
1588 |
for(int index = 0; index < table_size; index++) { |
|
1589 |
for(ProfilerNode* node = table[index]; node; node = node->next()) |
|
1590 |
node->oops_do(f); |
|
1591 |
} |
|
1592 |
} |
|
1593 |
||
1594 |
void FlatProfiler::oops_do(OopClosure* f) { |
|
1595 |
if (thread_profiler != NULL) { |
|
1596 |
thread_profiler->oops_do(f); |
|
1597 |
} else { |
|
1598 |
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { |
|
1599 |
ThreadProfiler* pp = tp->get_thread_profiler(); |
|
1600 |
if (pp != NULL) { |
|
1601 |
pp->oops_do(f); |
|
1602 |
} |
|
1603 |
} |
|
1604 |
} |
|
1605 |
} |