author | gziemski |
Thu, 31 Aug 2017 20:26:53 -0500 | |
changeset 47106 | bed18a111b90 |
parent 47104 | 6bdc0c9c44af |
child 47107 | 8356043b90f0 |
--- a/hotspot/make/lib/JvmFeatures.gmk Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/make/lib/JvmFeatures.gmk Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -88,11 +88,6 @@ JVM_EXCLUDE_FILES += jvmciCodeInstaller_$(HOTSPOT_TARGET_CPU_ARCH).cpp endif -ifneq ($(call check-jvm-feature, fprof), true) - JVM_CFLAGS_FEATURES += -DINCLUDE_FPROF=0 - JVM_EXCLUDE_FILES += fprofiler.cpp -endif - ifneq ($(call check-jvm-feature, vm-structs), true) JVM_CFLAGS_FEATURES += -DINCLUDE_VM_STRUCTS=0 JVM_EXCLUDE_FILES += vmStructs.cpp
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -292,7 +292,7 @@ if (VerifyThread) { // NOTE: this chops off the heads of the 64-bit O registers. // make sure G2_thread contains the right value - save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) + save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod) mov(G1, L1); // avoid clobbering G1 // G2 saved below mov(G3, L3); // avoid clobbering G3 @@ -398,7 +398,7 @@ #ifdef ASSERT // check that it WAS previously set - save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof + save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame ld_ptr(sp_addr, L0); tst(L0); breakpoint_trap(Assembler::zero, Assembler::ptr_cc); @@ -618,7 +618,7 @@ # ifdef ASSERT // Check that we are not overwriting any other oop. - save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof + save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod ld_ptr(vm_result_addr, L0); tst(L0); restore();
--- a/hotspot/src/os/aix/vm/os_aix.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os/aix/vm/os_aix.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -2607,11 +2607,10 @@ //////////////////////////////////////////////////////////////////////////////// // suspend/resume support -// the low-level signal-based suspend/resume support is a remnant from the +// The low-level signal-based suspend/resume support is a remnant from the // old VM-suspension that used to be for java-suspension, safepoints etc, -// within hotspot. Now there is a single use-case for this: -// - calling get_thread_pc() on the VMThread by the flat-profiler task -// that runs in the watcher thread. +// within hotspot. Currently used by JFR's OSThreadSampler +// // The remaining code is greatly simplified from the more general suspension // code that used to be used. // @@ -2627,7 +2626,13 @@ // // Note that the SR_lock plays no role in this suspend/resume protocol, // but is checked for NULL in SR_handler as a thread termination indicator. +// The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs. // +// Note that resume_clear_context() and suspend_save_context() are needed +// by SR_handler(), so that fetch_frame_from_ucontext() works, +// which in part is used by: +// - Forte Analyzer: AsyncGetCallTrace() +// - StackBanging: get_frame_at_stack_banging_point() static void resume_clear_context(OSThread *osthread) { osthread->set_ucontext(NULL); @@ -3634,44 +3639,6 @@ } } -class PcFetcher : public os::SuspendedThreadTask { -public: - PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} - ExtendedPC result(); -protected: - void do_task(const os::SuspendedThreadTaskContext& context); -private: - ExtendedPC _epc; -}; - -ExtendedPC PcFetcher::result() { - guarantee(is_done(), "task is not done yet."); - return _epc; -} - -void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { - Thread* thread = context.thread(); - OSThread* osthread = thread->osthread(); - if (osthread->ucontext() != NULL) { - _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext()); - } else { - // NULL context is unexpected, double-check this is the VMThread. - guarantee(thread->is_VM_thread(), "can only be called for VMThread"); - } -} - -// Suspends the target using the signal mechanism and then grabs the PC before -// resuming the target. Used by the flat-profiler only -ExtendedPC os::get_thread_pc(Thread* thread) { - // Make sure that it is called by the watcher for the VMThread. - assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); - assert(thread->is_VM_thread(), "Can only be called for VMThread"); - - PcFetcher fetcher(thread); - fetcher.run(); - return fetcher.result(); -} - //////////////////////////////////////////////////////////////////////////////// // debug support
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -2603,11 +2603,10 @@ //////////////////////////////////////////////////////////////////////////////// // suspend/resume support -// the low-level signal-based suspend/resume support is a remnant from the +// The low-level signal-based suspend/resume support is a remnant from the // old VM-suspension that used to be for java-suspension, safepoints etc, -// within hotspot. Now there is a single use-case for this: -// - calling get_thread_pc() on the VMThread by the flat-profiler task -// that runs in the watcher thread. +// within hotspot. Currently used by JFR's OSThreadSampler +// // The remaining code is greatly simplified from the more general suspension // code that used to be used. // @@ -2623,6 +2622,13 @@ // // Note that the SR_lock plays no role in this suspend/resume protocol, // but is checked for NULL in SR_handler as a thread termination indicator. +// The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs. +// +// Note that resume_clear_context() and suspend_save_context() are needed +// by SR_handler(), so that fetch_frame_from_ucontext() works, +// which in part is used by: +// - Forte Analyzer: AsyncGetCallTrace() +// - StackBanging: get_frame_at_stack_banging_point() static void resume_clear_context(OSThread *osthread) { osthread->set_ucontext(NULL); @@ -3521,45 +3527,6 @@ } } -/// -class PcFetcher : public os::SuspendedThreadTask { - public: - PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} - ExtendedPC result(); - protected: - void do_task(const os::SuspendedThreadTaskContext& context); - private: - ExtendedPC _epc; -}; - -ExtendedPC PcFetcher::result() { - guarantee(is_done(), "task is not done yet."); - return _epc; -} - -void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { - Thread* thread = context.thread(); - OSThread* osthread = thread->osthread(); - if (osthread->ucontext() != NULL) { - _epc = os::Bsd::ucontext_get_pc((const ucontext_t *) context.ucontext()); - } else { - // NULL context is unexpected, double-check this is the VMThread - guarantee(thread->is_VM_thread(), "can only be called for VMThread"); - } -} - -// Suspends the target using the signal mechanism and then grabs the PC before -// resuming the target. Used by the flat-profiler only -ExtendedPC os::get_thread_pc(Thread* thread) { - // Make sure that it is called by the watcher for the VMThread - assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); - assert(thread->is_VM_thread(), "Can only be called for VMThread"); - - PcFetcher fetcher(thread); - fetcher.run(); - return fetcher.result(); -} - //////////////////////////////////////////////////////////////////////////////// // debug support
--- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -4000,11 +4000,10 @@ //////////////////////////////////////////////////////////////////////////////// // suspend/resume support -// the low-level signal-based suspend/resume support is a remnant from the +// The low-level signal-based suspend/resume support is a remnant from the // old VM-suspension that used to be for java-suspension, safepoints etc, -// within hotspot. Now there is a single use-case for this: -// - calling get_thread_pc() on the VMThread by the flat-profiler task -// that runs in the watcher thread. +// within hotspot. Currently used by JFR's OSThreadSampler +// // The remaining code is greatly simplified from the more general suspension // code that used to be used. // @@ -4020,6 +4019,13 @@ // // Note that the SR_lock plays no role in this suspend/resume protocol, // but is checked for NULL in SR_handler as a thread termination indicator. +// The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs. +// +// Note that resume_clear_context() and suspend_save_context() are needed +// by SR_handler(), so that fetch_frame_from_ucontext() works, +// which in part is used by: +// - Forte Analyzer: AsyncGetCallTrace() +// - StackBanging: get_frame_at_stack_banging_point() static void resume_clear_context(OSThread *osthread) { osthread->set_ucontext(NULL); @@ -5060,44 +5066,6 @@ } } -class PcFetcher : public os::SuspendedThreadTask { - public: - PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} - ExtendedPC result(); - protected: - void do_task(const os::SuspendedThreadTaskContext& context); - private: - ExtendedPC _epc; -}; - -ExtendedPC PcFetcher::result() { - guarantee(is_done(), "task is not done yet."); - return _epc; -} - -void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { - Thread* thread = context.thread(); - OSThread* osthread = thread->osthread(); - if (osthread->ucontext() != NULL) { - _epc = os::Linux::ucontext_get_pc((const ucontext_t *) context.ucontext()); - } else { - // NULL context is unexpected, double-check this is the VMThread - guarantee(thread->is_VM_thread(), "can only be called for VMThread"); - } -} - -// Suspends the target using the signal mechanism and then grabs the PC before -// resuming the target. Used by the flat-profiler only -ExtendedPC os::get_thread_pc(Thread* thread) { - // Make sure that it is called by the watcher for the VMThread - assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); - assert(thread->is_VM_thread(), "Can only be called for VMThread"); - - PcFetcher fetcher(thread); - fetcher.run(); - return fetcher.result(); -} - //////////////////////////////////////////////////////////////////////////////// // debug support
--- a/hotspot/src/os/solaris/vm/osThread_solaris.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os/solaris/vm/osThread_solaris.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,12 +65,6 @@ void set_lwp_id(uint id) { _lwp_id = id; } void set_native_priority(int prio) { _native_priority = prio; } - // *************************************************************** - // interrupt support. interrupts (using signals) are used to get - // the thread context (get_thread_pc), to set the thread context - // (set_thread_pc), and to implement java.lang.Thread.interrupt. - // *************************************************************** - public: os::SuspendResume sr;
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -3442,6 +3442,37 @@ schedctl_start(schedctl_init()); } +//////////////////////////////////////////////////////////////////////////////// +// suspend/resume support + +// The low-level signal-based suspend/resume support is a remnant from the +// old VM-suspension that used to be for java-suspension, safepoints etc, +// within hotspot. Currently used by JFR's OSThreadSampler +// +// The remaining code is greatly simplified from the more general suspension +// code that used to be used. +// +// The protocol is quite simple: +// - suspend: +// - sends a signal to the target thread +// - polls the suspend state of the osthread using a yield loop +// - target thread signal handler (SR_handler) sets suspend state +// and blocks in sigsuspend until continued +// - resume: +// - sets target osthread state to continue +// - sends signal to end the sigsuspend loop in the SR_handler +// +// Note that the SR_lock plays no role in this suspend/resume protocol, +// but is checked for NULL in SR_handler as a thread termination indicator. +// The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs. +// +// Note that resume_clear_context() and suspend_save_context() are needed +// by SR_handler(), so that fetch_frame_from_ucontext() works, +// which in part is used by: +// - Forte Analyzer: AsyncGetCallTrace() +// - StackBanging: get_frame_at_stack_banging_point() +// - JFR: get_topframe()-->....-->get_valid_uc_in_signal_handler() + static void resume_clear_context(OSThread *osthread) { osthread->set_ucontext(NULL); } @@ -3452,7 +3483,7 @@ static PosixSemaphore sr_semaphore; -void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) { +void os::Solaris::SR_handler(Thread* thread, ucontext_t* context) { // Save and restore errno to avoid confusing native code with EINTR // after sigsuspend. int old_errno = errno; @@ -3462,7 +3493,7 @@ os::SuspendResume::State current = osthread->sr.state(); if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { - suspend_save_context(osthread, uc); + suspend_save_context(osthread, context); // attempt to switch the state, we assume we had a SUSPEND_REQUEST os::SuspendResume::State state = osthread->sr.suspended(); @@ -3609,45 +3640,6 @@ } } -class PcFetcher : public os::SuspendedThreadTask { - public: - PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} - ExtendedPC result(); - protected: - void do_task(const os::SuspendedThreadTaskContext& context); - private: - ExtendedPC _epc; -}; - -ExtendedPC PcFetcher::result() { - guarantee(is_done(), "task is not done yet."); - return _epc; -} - -void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { - Thread* thread = context.thread(); - OSThread* osthread = thread->osthread(); - if (osthread->ucontext() != NULL) { - _epc = os::Solaris::ucontext_get_pc((const ucontext_t *) context.ucontext()); - } else { - // NULL context is unexpected, double-check this is the VMThread - guarantee(thread->is_VM_thread(), "can only be called for VMThread"); - } -} - -// A lightweight implementation that does not suspend the target thread and -// thus returns only a hint. Used for profiling only! -ExtendedPC os::get_thread_pc(Thread* thread) { - // Make sure that it is called by the watcher and the Threads lock is owned. - assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); - // For now, is only used to profile the VM Thread - assert(thread->is_VM_thread(), "Can only be called for VMThread"); - PcFetcher fetcher(thread); - fetcher.run(); - return fetcher.result(); -} - - // This does not do anything on Solaris. This is basically a hook for being // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. void os::os_exception_wrapper(java_call_t f, JavaValue* value,
--- a/hotspot/src/os/windows/vm/os_windows.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os/windows/vm/os_windows.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -3506,22 +3506,6 @@ return interrupted; } -// Get's a pc (hint) for a running thread. Currently used only for profiling. -ExtendedPC os::get_thread_pc(Thread* thread) { - CONTEXT context; - context.ContextFlags = CONTEXT_CONTROL; - HANDLE handle = thread->osthread()->thread_handle(); - if (GetThreadContext(handle, &context)) { -#ifdef _M_AMD64 - return ExtendedPC((address) context.Rip); -#else - return ExtendedPC((address) context.Eip); -#endif - } else { - return ExtendedPC(NULL); - } -} - // GetCurrentThreadId() returns DWORD intx os::current_thread_id() { return GetCurrentThreadId(); }
--- a/hotspot/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,7 +29,6 @@ private: void pd_initialize() { _anchor.clear(); - _last_interpreter_fp = NULL; } // The `last' frame is the youngest Java frame on the thread's stack. @@ -60,20 +59,4 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); - // -Xprof support - // - // In order to find the last Java fp from an async profile - // tick, we store the current interpreter fp in the thread. - // This value is only valid while we are in the C++ interpreter - // and profiling. - protected: - intptr_t *_last_interpreter_fp; - - public: - static ByteSize last_interpreter_fp_offset() { - return byte_offset_of(JavaThread, _last_interpreter_fp); - } - - intptr_t* last_interpreter_fp() { return _last_interpreter_fp; } - #endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
--- a/hotspot/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,7 +30,6 @@ void pd_initialize() { _anchor.clear(); - _last_interpreter_fp = NULL; } // The `last' frame is the youngest Java frame on the thread's stack. @@ -62,22 +61,4 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); - protected: - - // -Xprof support - // - // In order to find the last Java fp from an async profile - // tick, we store the current interpreter fp in the thread. - // This value is only valid while we are in the C++ interpreter - // and profiling. - intptr_t *_last_interpreter_fp; - - public: - - static ByteSize last_interpreter_fp_offset() { - return byte_offset_of(JavaThread, _last_interpreter_fp); - } - - intptr_t* last_interpreter_fp() { return _last_interpreter_fp; } - #endif // OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
--- a/hotspot/src/os_cpu/linux_s390/vm/thread_linux_s390.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/os_cpu/linux_s390/vm/thread_linux_s390.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,7 +30,6 @@ void pd_initialize() { _anchor.clear(); - _last_interpreter_fp = NULL; } // The `last' frame is the youngest Java frame on the thread's stack. @@ -61,22 +60,4 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); - protected: - - // -Xprof support - // - // In order to find the last Java fp from an async profile - // tick, we store the current interpreter fp in the thread. - // This value is only valid while we are in the C++ interpreter - // and profiling. - intptr_t *_last_interpreter_fp; - - public: - - static ByteSize last_interpreter_fp_offset() { - return byte_offset_of(JavaThread, _last_interpreter_fp); - } - - intptr_t* last_interpreter_fp() { return _last_interpreter_fp; } - #endif // OS_CPU_LINUX_S390_VM_THREAD_LINUX_S390_HPP
--- a/hotspot/src/share/vm/Xusage.txt Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/Xusage.txt Thu Aug 31 20:26:53 2017 -0500 @@ -12,7 +12,6 @@ -Xms<size> set initial Java heap size -Xmx<size> set maximum Java heap size -Xss<size> set java thread stack size - -Xprof output cpu profiling data (deprecated) -Xfuture enable strictest checks, anticipating future default -Xrs reduce use of OS signals by Java/VM (see documentation) -Xcheck:jni perform additional checks for JNI functions
--- a/hotspot/src/share/vm/classfile/classLoader.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/classfile/classLoader.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -57,7 +57,6 @@ #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" #include "runtime/compilationPolicy.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" @@ -1442,7 +1441,6 @@ const char* const class_name = name->as_C_string(); EventMark m("loading class %s", class_name); - ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion); const char* const file_name = file_name_for_class_name(class_name, name->utf8_length());
--- a/hotspot/src/share/vm/compiler/disassembler.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/compiler/disassembler.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,6 @@ #include "gc/shared/collectedHeap.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" #include "runtime/os.hpp" #include "runtime/stubCodeGenerator.hpp" @@ -163,7 +162,6 @@ bool _print_pc; bool _print_bytes; address _cur_insn; - int _total_ticks; int _bytes_per_line; // arch-specific formatting option static bool match(const char* event, const char* tag) { @@ -213,18 +211,6 @@ _nm->print_code_comment_on(st, COMMENT_COLUMN, pc0, pc); // this calls reloc_string_for which calls oop::print_value_on } - - // Output pc bucket ticks if we have any - if (total_ticks() != 0) { - address bucket_pc = FlatProfiler::bucket_start_for(pc); - if (bucket_pc != NULL && bucket_pc > pc0 && bucket_pc <= pc) { - int bucket_count = FlatProfiler::bucket_count_for(pc0); - if (bucket_count != 0) { - st->bol(); - st->print_cr("%3.1f%% [%d]", bucket_count*100.0/total_ticks(), bucket_count); - } - } - } // follow each complete insn by a nice newline st->cr(); } @@ -233,8 +219,6 @@ outputStream* output() { return _output; } address cur_insn() { return _cur_insn; } - int total_ticks() { return _total_ticks; } - void set_total_ticks(int n) { _total_ticks = n; } const char* options() { return _option_buf; } }; @@ -561,20 +545,6 @@ #endif env.output()->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT "] " JLONG_FORMAT " bytes", p2i(p), p2i(end), ((jlong)(end - p))); - // If there has been profiling, print the buckets. - if (FlatProfiler::bucket_start_for(p) != NULL) { - unsigned char* p1 = p; - int total_bucket_count = 0; - while (p1 < end) { - unsigned char* p0 = p1; - p1 += pd_instruction_alignment(); - address bucket_pc = FlatProfiler::bucket_start_for(p1); - if (bucket_pc != NULL && bucket_pc > p0 && bucket_pc <= p1) - total_bucket_count += FlatProfiler::bucket_count_for(p0); - } - env.set_total_ticks(total_bucket_count); - } - // Print constant table. if (nm->consts_size() > 0) { nm->print_nmethod_labels(env.output(), nm->consts_begin());
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -54,7 +54,6 @@ _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):"); _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):"); _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):"); - _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms):"); _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):"); _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):"); _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -49,7 +49,6 @@ UniverseRoots, JNIRoots, ObjectSynchronizerRoots, - FlatProfilerRoots, ManagementRoots, SystemDictionaryRoots, CLDGRoots,
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -48,7 +48,6 @@ #include "prims/jvmtiExport.hpp" #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.hpp" #include "runtime/vmThread.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,6 @@ #include "gc/g1/g1RootProcessor.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "memory/allocation.inline.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/mutex.hpp" #include "services/management.hpp" #include "utilities/macros.hpp" @@ -272,13 +271,6 @@ } { - G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::FlatProfilerRoots, worker_i); - if (!_process_strong_tasks.is_task_claimed(G1RP_PS_FlatProfiler_oops_do)) { - FlatProfiler::oops_do(strong_roots); - } - } - - { G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i); if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) { Management::oops_do(strong_roots);
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -57,7 +57,6 @@ G1RP_PS_Universe_oops_do, G1RP_PS_JNIHandles_oops_do, G1RP_PS_ObjectSynchronizer_oops_do, - G1RP_PS_FlatProfiler_oops_do, G1RP_PS_Management_oops_do, G1RP_PS_SystemDictionary_oops_do, G1RP_PS_ClassLoaderDataGraph_oops_do,
--- a/hotspot/src/share/vm/gc/parallel/pcTasks.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/parallel/pcTasks.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,6 @@ #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/jniHandles.hpp" #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" @@ -105,10 +104,6 @@ ObjectSynchronizer::oops_do(&mark_and_push_closure); break; - case flat_profiler: - FlatProfiler::oops_do(&mark_and_push_closure); - break; - case management: Management::oops_do(&mark_and_push_closure); break;
--- a/hotspot/src/share/vm/gc/parallel/pcTasks.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/parallel/pcTasks.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,12 +94,11 @@ jni_handles = 2, threads = 3, object_synchronizer = 4, - flat_profiler = 5, - management = 6, - jvmti = 7, - system_dictionary = 8, - class_loader_data = 9, - code_cache = 10 + management = 5, + jvmti = 6, + system_dictionary = 7, + class_loader_data = 8, + code_cache = 9 }; private: RootType _root_type;
--- a/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -50,7 +50,6 @@ #include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -514,7 +513,6 @@ MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations); Threads::oops_do(mark_and_push_closure(), &each_active_code_blob); ObjectSynchronizer::oops_do(mark_and_push_closure()); - FlatProfiler::oops_do(mark_and_push_closure()); Management::oops_do(mark_and_push_closure()); JvmtiExport::oops_do(mark_and_push_closure()); SystemDictionary::always_strong_oops_do(mark_and_push_closure()); @@ -607,7 +605,6 @@ JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles Threads::oops_do(adjust_pointer_closure(), NULL); ObjectSynchronizer::oops_do(adjust_pointer_closure()); - FlatProfiler::oops_do(adjust_pointer_closure()); Management::oops_do(adjust_pointer_closure()); JvmtiExport::oops_do(adjust_pointer_closure()); SystemDictionary::oops_do(adjust_pointer_closure());
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -60,7 +60,6 @@ #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -2086,7 +2085,6 @@ // We scan the thread roots in parallel Threads::create_thread_roots_marking_tasks(q); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer)); - q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data)); @@ -2169,7 +2167,6 @@ JNIHandles::oops_do(&oop_closure); // Global (strong) JNI handles Threads::oops_do(&oop_closure, NULL); ObjectSynchronizer::oops_do(&oop_closure); - FlatProfiler::oops_do(&oop_closure); Management::oops_do(&oop_closure); JvmtiExport::oops_do(&oop_closure); SystemDictionary::oops_do(&oop_closure);
--- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -49,7 +49,6 @@ #include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" #include "runtime/threadCritical.hpp" #include "runtime/vmThread.hpp" @@ -381,7 +380,6 @@ // We scan the thread roots in parallel Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); - q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
--- a/hotspot/src/share/vm/gc/parallel/psTasks.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/parallel/psTasks.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,6 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -74,10 +73,6 @@ ObjectSynchronizer::oops_do(&roots_closure); break; - case flat_profiler: - FlatProfiler::oops_do(&roots_closure); - break; - case system_dictionary: SystemDictionary::oops_do(&roots_closure); break;
--- a/hotspot/src/share/vm/gc/parallel/psTasks.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/parallel/psTasks.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,12 +57,11 @@ jni_handles = 2, threads = 3, object_synchronizer = 4, - flat_profiler = 5, - system_dictionary = 6, - class_loader_data = 7, - management = 8, - jvmti = 9, - code_cache = 10 + system_dictionary = 5, + class_loader_data = 6, + management = 7, + jvmti = 8, + code_cache = 9 }; private: RootType _root_type;
--- a/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -46,7 +46,6 @@ #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.inline.hpp"
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -47,7 +47,6 @@ #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" @@ -71,7 +70,6 @@ GCH_PS_Universe_oops_do, GCH_PS_JNIHandles_oops_do, GCH_PS_ObjectSynchronizer_oops_do, - GCH_PS_FlatProfiler_oops_do, GCH_PS_Management_oops_do, GCH_PS_SystemDictionary_oops_do, GCH_PS_ClassLoaderDataGraph_oops_do, @@ -606,9 +604,6 @@ if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) { ObjectSynchronizer::oops_do(strong_roots); } - if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) { - FlatProfiler::oops_do(strong_roots); - } if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) { Management::oops_do(strong_roots); }
--- a/hotspot/src/share/vm/memory/universe.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/memory/universe.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -63,7 +63,6 @@ #include "runtime/atomic.hpp" #include "runtime/commandLineFlagConstraintList.hpp" #include "runtime/deoptimization.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp"
--- a/hotspot/src/share/vm/opto/runtime.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/opto/runtime.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -61,7 +61,6 @@ #include "opto/runtime.hpp" #include "opto/subnode.hpp" #include "runtime/atomic.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp"
--- a/hotspot/src/share/vm/prims/jni.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/prims/jni.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -62,7 +62,6 @@ #include "runtime/atomic.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/fieldDescriptor.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/java.hpp"
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/runtime/arguments.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -78,7 +78,6 @@ char* Arguments::_java_command = NULL; SystemProperty* Arguments::_system_properties = NULL; const char* Arguments::_gc_log_filename = NULL; -bool Arguments::_has_profile = false; size_t Arguments::_conservative_max_heap_alignment = 0; size_t Arguments::_min_heap_size = 0; Arguments::Mode Arguments::_mode = _mixed; @@ -3160,16 +3159,12 @@ if (FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true) != Flag::SUCCESS) { return JNI_EINVAL; } - // -Xprof + // -Xprof } else if (match_option(option, "-Xprof")) { -#if INCLUDE_FPROF - log_warning(arguments)("Option -Xprof was deprecated in version 9 and will likely be removed in a future release."); - _has_profile = true; -#else // INCLUDE_FPROF - jio_fprintf(defaultStream::error_stream(), - "Flat profiling is not supported in this VM.\n"); - return JNI_ERR; -#endif // INCLUDE_FPROF + char version[256]; + // Obsolete in JDK 10 + JDK_Version::jdk(10).to_string(version, sizeof(version)); + warning("Ignoring option %s; support was removed in %s", option->optionString, version); // -Xconcurrentio } else if (match_option(option, "-Xconcurrentio")) { if (FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true) != Flag::SUCCESS) {
--- a/hotspot/src/share/vm/runtime/arguments.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/runtime/arguments.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -412,7 +412,6 @@ static bool _sun_java_launcher_is_altjvm; // Option flags - static bool _has_profile; static const char* _gc_log_filename; // Value of the conservative maximum heap alignment needed static size_t _conservative_max_heap_alignment; @@ -696,9 +695,6 @@ // -Dsun.java.launcher.pid static int sun_java_launcher_pid() { return _sun_java_launcher_pid; } - // -Xprof - static bool has_profile() { return _has_profile; } - // -Xms static size_t min_heap_size() { return _min_heap_size; } static void set_min_heap_size(size_t v) { _min_heap_size = v; }
--- a/hotspot/src/share/vm/runtime/fprofiler.cpp Thu Aug 31 17:06:10 2017 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1623 +0,0 @@ -/* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "classfile/classLoader.hpp" -#include "code/codeCache.hpp" -#include "code/vtableStubs.hpp" -#include "gc/shared/collectedHeap.inline.hpp" -#include "interpreter/interpreter.hpp" -#include "memory/allocation.inline.hpp" -#include "memory/resourceArea.hpp" -#include "memory/universe.inline.hpp" -#include "oops/oop.inline.hpp" -#include "oops/symbol.hpp" -#include "runtime/deoptimization.hpp" -#include "runtime/fprofiler.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/stubCodeGenerator.hpp" -#include "runtime/stubRoutines.hpp" -#include "runtime/task.hpp" -#include "runtime/thread.inline.hpp" -#include "runtime/vframe.hpp" -#include "utilities/macros.hpp" - -// Static fields of FlatProfiler -int FlatProfiler::received_gc_ticks = 0; -int FlatProfiler::vm_operation_ticks = 0; -int FlatProfiler::threads_lock_ticks = 0; -int FlatProfiler::class_loader_ticks = 0; -int FlatProfiler::extra_ticks = 0; -int FlatProfiler::blocked_ticks = 0; -int FlatProfiler::deopt_ticks = 0; -int FlatProfiler::unknown_ticks = 0; -int FlatProfiler::interpreter_ticks = 0; -int FlatProfiler::compiler_ticks = 0; -int FlatProfiler::received_ticks = 0; -int FlatProfiler::delivered_ticks = 0; -int* FlatProfiler::bytecode_ticks = NULL; -int* FlatProfiler::bytecode_ticks_stub = NULL; -int FlatProfiler::all_int_ticks = 0; -int FlatProfiler::all_comp_ticks = 0; -int FlatProfiler::all_ticks = 0; -bool FlatProfiler::full_profile_flag = false; -ThreadProfiler* FlatProfiler::thread_profiler = NULL; -ThreadProfiler* FlatProfiler::vm_thread_profiler = NULL; -FlatProfilerTask* FlatProfiler::task = NULL; -elapsedTimer FlatProfiler::timer; -int FlatProfiler::interval_ticks_previous = 0; -IntervalData* FlatProfiler::interval_data = NULL; - -ThreadProfiler::ThreadProfiler() { - // Space for the ProfilerNodes - const int area_size = 1 * ProfilerNodeSize * 1024; - area_bottom = AllocateHeap(area_size, mtInternal); - area_top = area_bottom; - area_limit = area_bottom + area_size; - - // ProfilerNode pointer table - table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size, mtInternal); - initialize(); - engaged = false; -} - -ThreadProfiler::~ThreadProfiler() { - FreeHeap(area_bottom); - area_bottom = NULL; - area_top = NULL; - area_limit = NULL; - FreeHeap(table); - table = NULL; -} - -// Statics for ThreadProfiler -int ThreadProfiler::table_size = 1024; - -int ThreadProfiler::entry(int value) { - value = (value > 0) ? value : -value; - return value % table_size; -} - -ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) { - _r = r; - _pp = NULL; - assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds"); - Thread* tp = Thread::current(); - if (tp != NULL && tp->is_Java_thread()) { - JavaThread* jtp = (JavaThread*) tp; - ThreadProfiler* pp = jtp->get_thread_profiler(); - _pp = pp; - if (pp != NULL) { - pp->region_flag[r] = true; - } - } -} - -ThreadProfilerMark::~ThreadProfilerMark() { - if (_pp != NULL) { - _pp->region_flag[_r] = false; - } - _pp = NULL; -} - -// Random other statics -static const int col1 = 2; // position of output column 1 -static const int col2 = 11; // position of output column 2 -static const int col3 = 25; // position of output column 3 -static const int col4 = 55; // position of output column 4 - - -// Used for detailed profiling of nmethods. -class PCRecorder : AllStatic { - private: - static int* counters; - static address base; - enum { - bucket_size = 16 - }; - static int index_for(address pc) { return (pc - base)/bucket_size; } - static address pc_for(int index) { return base + (index * bucket_size); } - static int size() { - return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord; - } - public: - static address bucket_start_for(address pc) { - if (counters == NULL) return NULL; - return pc_for(index_for(pc)); - } - static int bucket_count_for(address pc) { return counters[index_for(pc)]; } - static void init(); - static void record(address pc); - static void print(); - static void print_blobs(CodeBlob* cb); -}; - -int* PCRecorder::counters = NULL; -address PCRecorder::base = NULL; - -void PCRecorder::init() { - MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag); - int s = size(); - counters = NEW_C_HEAP_ARRAY(int, s, mtInternal); - for (int index = 0; index < s; index++) { - counters[index] = 0; - } - base = CodeCache::low_bound(); -} - -void PCRecorder::record(address pc) { - if (counters == NULL) return; - assert(CodeCache::contains(pc), "must be in CodeCache"); - counters[index_for(pc)]++; -} - - -address FlatProfiler::bucket_start_for(address pc) { - return PCRecorder::bucket_start_for(pc); -} - -int FlatProfiler::bucket_count_for(address pc) { - return PCRecorder::bucket_count_for(pc); -} - -void PCRecorder::print() { - if (counters == NULL) return; - - tty->cr(); - tty->print_cr("Printing compiled methods with PC buckets having more than " INTX_FORMAT " ticks", ProfilerPCTickThreshold); - tty->print_cr("==================================================================="); - tty->cr(); - - GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20); - - - int s; - { - MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag); - s = size(); - } - - for (int index = 0; index < s; index++) { - int count = counters[index]; - if (count > ProfilerPCTickThreshold) { - address pc = pc_for(index); - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); - if (cb != NULL && candidates->find(cb) < 0) { - candidates->push(cb); - } - } - } - for (int i = 0; i < candidates->length(); i++) { - print_blobs(candidates->at(i)); - } -} - -void PCRecorder::print_blobs(CodeBlob* cb) { - if (cb != NULL) { - cb->print(); - if (cb->is_nmethod()) { - ((nmethod*)cb)->print_code(); - } - tty->cr(); - } else { - tty->print_cr("stub code"); - } -} - -class tick_counter { // holds tick info for one node - public: - int ticks_in_code; - int ticks_in_native; - - tick_counter() { ticks_in_code = ticks_in_native = 0; } - tick_counter(int code, int native) { ticks_in_code = code; ticks_in_native = native; } - - int total() const { - return (ticks_in_code + ticks_in_native); - } - - void add(tick_counter* a) { - ticks_in_code += a->ticks_in_code; - ticks_in_native += a->ticks_in_native; - } - - void update(TickPosition where) { - switch(where) { - case tp_code: ticks_in_code++; break; - case tp_native: ticks_in_native++; break; - } - } - - void print_code(outputStream* st, int total_ticks) { - st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code); - } - - void print_native(outputStream* st) { - st->print(" + %5d ", ticks_in_native); - } -}; - -class ProfilerNode { - private: - ProfilerNode* _next; - public: - tick_counter ticks; - - public: - - void* operator new(size_t size, ThreadProfiler* tp) throw(); - void operator delete(void* p); - - ProfilerNode() { - _next = NULL; - } - - virtual ~ProfilerNode() { - if (_next) - delete _next; - } - - void set_next(ProfilerNode* n) { _next = n; } - ProfilerNode* next() { return _next; } - - void update(TickPosition where) { ticks.update(where);} - int total_ticks() { return ticks.total(); } - - virtual bool is_interpreted() const { return false; } - virtual bool is_compiled() const { return false; } - virtual bool is_stub() const { return false; } - virtual bool is_runtime_stub() const{ return false; } - virtual void oops_do(OopClosure* f) = 0; - - virtual bool interpreted_match(Method* m) const { return false; } - virtual bool compiled_match(Method* m ) const { return false; } - virtual bool stub_match(Method* m, const char* name) const { return false; } - virtual bool adapter_match() const { return false; } - virtual bool runtimeStub_match(const CodeBlob* stub, const char* name) const { return false; } - virtual bool unknown_compiled_match(const CodeBlob* cb) const { return false; } - - static void print_title(outputStream* st) { - st->print(" + native"); - st->fill_to(col3); - st->print("Method"); - st->fill_to(col4); - st->cr(); - } - - static void print_total(outputStream* st, tick_counter* t, int total, const char* msg) { - t->print_code(st, total); - st->fill_to(col2); - t->print_native(st); - st->fill_to(col3); - st->print("%s", msg); - st->cr(); - } - - virtual Method* method() = 0; - - virtual void print_method_on(outputStream* st) { - int limit; - int i; - Method* m = method(); - Symbol* k = m->klass_name(); - // Print the class name with dots instead of slashes - limit = k->utf8_length(); - for (i = 0 ; i < limit ; i += 1) { - char c = (char) k->byte_at(i); - if (c == '/') { - c = '.'; - } - st->print("%c", c); - } - if (limit > 0) { - st->print("."); - } - Symbol* n = m->name(); - limit = n->utf8_length(); - for (i = 0 ; i < limit ; i += 1) { - char c = (char) n->byte_at(i); - st->print("%c", c); - } - if (Verbose || WizardMode) { - // Disambiguate overloaded methods - Symbol* sig = m->signature(); - sig->print_symbol_on(st); - } else if (MethodHandles::is_signature_polymorphic(m->intrinsic_id())) - // compare with Method::print_short_name - MethodHandles::print_as_basic_type_signature_on(st, m->signature(), true); - } - - virtual void print(outputStream* st, int total_ticks) { - ticks.print_code(st, total_ticks); - st->fill_to(col2); - ticks.print_native(st); - st->fill_to(col3); - print_method_on(st); - st->cr(); - } - - // for hashing into the table - static int hash(Method* method) { - // The point here is to try to make something fairly unique - // out of the fields we can read without grabbing any locks - // since the method may be locked when we need the hash. - return ( - method->code_size() ^ - method->max_stack() ^ - method->max_locals() ^ - method->size_of_parameters()); - } - - // for sorting - static int compare(ProfilerNode** a, ProfilerNode** b) { - return (*b)->total_ticks() - (*a)->total_ticks(); - } -}; - -void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() { - void* result = (void*) tp->area_top; - tp->area_top += size; - - if (tp->area_top > tp->area_limit) { - fatal("flat profiler buffer overflow"); - } - return result; -} - -void ProfilerNode::operator delete(void* p){ -} - -class interpretedNode : public ProfilerNode { - private: - Method* _method; - oop _class_loader; // needed to keep metadata for the method alive - public: - interpretedNode(Method* method, TickPosition where) : ProfilerNode() { - _method = method; - _class_loader = method->method_holder()->class_loader(); - update(where); - } - - bool is_interpreted() const { return true; } - - bool interpreted_match(Method* m) const { - return _method == m; - } - - void oops_do(OopClosure* f) { - f->do_oop(&_class_loader); - } - - Method* method() { return _method; } - - static void print_title(outputStream* st) { - st->fill_to(col1); - st->print("%11s", "Interpreted"); - ProfilerNode::print_title(st); - } - - void print(outputStream* st, int total_ticks) { - ProfilerNode::print(st, total_ticks); - } - - void print_method_on(outputStream* st) { - ProfilerNode::print_method_on(st); - MethodCounters* mcs = method()->method_counters(); - if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short(); - } -}; - -class compiledNode : public ProfilerNode { - private: - Method* _method; - oop _class_loader; // needed to keep metadata for the method alive - public: - compiledNode(Method* method, TickPosition where) : ProfilerNode() { - _method = method; - _class_loader = method->method_holder()->class_loader(); - update(where); - } - bool is_compiled() const { return true; } - - bool compiled_match(Method* m) const { - return _method == m; - } - - Method* method() { return _method; } - - void oops_do(OopClosure* f) { - f->do_oop(&_class_loader); - } - - static void print_title(outputStream* st) { - st->fill_to(col1); - st->print("%11s", "Compiled"); - ProfilerNode::print_title(st); - } - - void print(outputStream* st, int total_ticks) { - ProfilerNode::print(st, total_ticks); - } - - void print_method_on(outputStream* st) { - ProfilerNode::print_method_on(st); - } -}; - -class stubNode : public ProfilerNode { - private: - Method* _method; - oop _class_loader; // needed to keep metadata for the method alive - const char* _symbol; // The name of the nearest VM symbol (for +ProfileVM). Points to a unique string - public: - stubNode(Method* method, const char* name, TickPosition where) : ProfilerNode() { - _method = method; - _class_loader = method->method_holder()->class_loader(); - _symbol = name; - update(where); - } - - bool is_stub() const { return true; } - - void oops_do(OopClosure* f) { - f->do_oop(&_class_loader); - } - - bool stub_match(Method* m, const char* name) const { - return (_method == m) && (_symbol == name); - } - - Method* method() { return _method; } - - static void print_title(outputStream* st) { - st->fill_to(col1); - st->print("%11s", "Stub"); - ProfilerNode::print_title(st); - } - - void print(outputStream* st, int total_ticks) { - ProfilerNode::print(st, total_ticks); - } - - void print_method_on(outputStream* st) { - ProfilerNode::print_method_on(st); - print_symbol_on(st); - } - - void print_symbol_on(outputStream* st) { - if(_symbol) { - st->print(" (%s)", _symbol); - } - } -}; - -class adapterNode : public ProfilerNode { - public: - adapterNode(TickPosition where) : ProfilerNode() { - update(where); - } - bool is_compiled() const { return true; } - - bool adapter_match() const { return true; } - - Method* method() { return NULL; } - - void oops_do(OopClosure* f) { - ; - } - - void print(outputStream* st, int total_ticks) { - ProfilerNode::print(st, total_ticks); - } - - void print_method_on(outputStream* st) { - st->print("%s", "adapters"); - } -}; - -class runtimeStubNode : public ProfilerNode { - private: - const RuntimeStub* _stub; - const char* _symbol; // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string. - public: - runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(NULL), _symbol(name) { - assert(stub->is_runtime_stub(), "wrong code blob"); - _stub = (RuntimeStub*) stub; - update(where); - } - - bool is_runtime_stub() const { return true; } - - bool runtimeStub_match(const CodeBlob* stub, const char* name) const { - assert(stub->is_runtime_stub(), "wrong code blob"); - return _stub->entry_point() == ((RuntimeStub*)stub)->entry_point() && - (_symbol == name); - } - - Method* method() { return NULL; } - - static void print_title(outputStream* st) { - st->fill_to(col1); - st->print("%11s", "Runtime stub"); - ProfilerNode::print_title(st); - } - - void oops_do(OopClosure* f) { - ; - } - - void print(outputStream* st, int total_ticks) { - ProfilerNode::print(st, total_ticks); - } - - void print_method_on(outputStream* st) { - st->print("%s", _stub->name()); - print_symbol_on(st); - } - - void print_symbol_on(outputStream* st) { - if(_symbol) { - st->print(" (%s)", _symbol); - } - } -}; - - -class unknown_compiledNode : public ProfilerNode { - const char *_name; - public: - unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() { - if ( cb->is_buffer_blob() ) - _name = ((const BufferBlob*)cb)->name(); - else - _name = ((const SingletonBlob*)cb)->name(); - update(where); - } - bool is_compiled() const { return true; } - - bool unknown_compiled_match(const CodeBlob* cb) const { - if ( cb->is_buffer_blob() ) - return !strcmp(((const BufferBlob*)cb)->name(), _name); - else - return !strcmp(((const SingletonBlob*)cb)->name(), _name); - } - - Method* method() { return NULL; } - - void oops_do(OopClosure* f) { - ; - } - - void print(outputStream* st, int total_ticks) { - ProfilerNode::print(st, total_ticks); - } - - void print_method_on(outputStream* st) { - st->print("%s", _name); - } -}; - -class vmNode : public ProfilerNode { - private: - const char* _name; // "optional" name obtained by os means such as dll lookup - public: - vmNode(const TickPosition where) : ProfilerNode() { - _name = NULL; - update(where); - } - - vmNode(const char* name, const TickPosition where) : ProfilerNode() { - _name = os::strdup(name); - update(where); - } - - ~vmNode() { - if (_name != NULL) { - os::free((void*)_name); - } - } - - const char *name() const { return _name; } - bool is_compiled() const { return true; } - - bool vm_match(const char* name) const { return strcmp(name, _name) == 0; } - - Method* method() { return NULL; } - - static int hash(const char* name){ - // Compute a simple hash - const char* cp = name; - int h = 0; - - if(name != NULL){ - while(*cp != '\0'){ - h = (h << 1) ^ *cp; - cp++; - } - } - return h; - } - - void oops_do(OopClosure* f) { - ; - } - - void print(outputStream* st, int total_ticks) { - ProfilerNode::print(st, total_ticks); - } - - void print_method_on(outputStream* st) { - if(_name==NULL){ - st->print("%s", "unknown code"); - } - else { - st->print("%s", _name); - } - } -}; - -void ThreadProfiler::interpreted_update(Method* method, TickPosition where) { - int index = entry(ProfilerNode::hash(method)); - if (!table[index]) { - table[index] = new (this) interpretedNode(method, where); - } else { - ProfilerNode* prev = table[index]; - for(ProfilerNode* node = prev; node; node = node->next()) { - if (node->interpreted_match(method)) { - node->update(where); - return; - } - prev = node; - } - prev->set_next(new (this) interpretedNode(method, where)); - } -} - -void ThreadProfiler::compiled_update(Method* method, TickPosition where) { - int index = entry(ProfilerNode::hash(method)); - if (!table[index]) { - table[index] = new (this) compiledNode(method, where); - } else { - ProfilerNode* prev = table[index]; - for(ProfilerNode* node = prev; node; node = node->next()) { - if (node->compiled_match(method)) { - node->update(where); - return; - } - prev = node; - } - prev->set_next(new (this) compiledNode(method, where)); - } -} - -void ThreadProfiler::stub_update(Method* method, const char* name, TickPosition where) { - int index = entry(ProfilerNode::hash(method)); - if (!table[index]) { - table[index] = new (this) stubNode(method, name, where); - } else { - ProfilerNode* prev = table[index]; - for(ProfilerNode* node = prev; node; node = node->next()) { - if (node->stub_match(method, name)) { - node->update(where); - return; - } - prev = node; - } - prev->set_next(new (this) stubNode(method, name, where)); - } -} - -void ThreadProfiler::adapter_update(TickPosition where) { - int index = 0; - if (!table[index]) { - table[index] = new (this) adapterNode(where); - } else { - ProfilerNode* prev = table[index]; - for(ProfilerNode* node = prev; node; node = node->next()) { - if (node->adapter_match()) { - node->update(where); - return; - } - prev = node; - } - prev->set_next(new (this) adapterNode(where)); - } -} - -void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) { - int index = 0; - if (!table[index]) { - table[index] = new (this) runtimeStubNode(stub, name, where); - } else { - ProfilerNode* prev = table[index]; - for(ProfilerNode* node = prev; node; node = node->next()) { - if (node->runtimeStub_match(stub, name)) { - node->update(where); - return; - } - prev = node; - } - prev->set_next(new (this) runtimeStubNode(stub, name, where)); - } -} - - -void ThreadProfiler::unknown_compiled_update(const CodeBlob* cb, TickPosition where) { - int index = 0; - if (!table[index]) { - table[index] = new (this) unknown_compiledNode(cb, where); - } else { - ProfilerNode* prev = table[index]; - for(ProfilerNode* node = prev; node; node = node->next()) { - if (node->unknown_compiled_match(cb)) { - node->update(where); - return; - } - prev = node; - } - prev->set_next(new (this) unknown_compiledNode(cb, where)); - } -} - -void ThreadProfiler::vm_update(TickPosition where) { - vm_update(NULL, where); -} - -void ThreadProfiler::vm_update(const char* name, TickPosition where) { - int index = entry(vmNode::hash(name)); - assert(index >= 0, "Must be positive"); - // Note that we call strdup below since the symbol may be resource allocated - if (!table[index]) { - table[index] = new (this) vmNode(name, where); - } else { - ProfilerNode* prev = table[index]; - for(ProfilerNode* node = prev; node; node = node->next()) { - if (((vmNode *)node)->vm_match(name)) { - node->update(where); - return; - } - prev = node; - } - prev->set_next(new (this) vmNode(name, where)); - } -} - - -class FlatProfilerTask : public PeriodicTask { -public: - FlatProfilerTask(int interval_time) : PeriodicTask(interval_time) {} - void task(); -}; - -void FlatProfiler::record_vm_operation() { - if (Universe::heap()->is_gc_active()) { - FlatProfiler::received_gc_ticks += 1; - return; - } - - if (DeoptimizationMarker::is_active()) { - FlatProfiler::deopt_ticks += 1; - return; - } - - FlatProfiler::vm_operation_ticks += 1; -} - -void FlatProfiler::record_vm_tick() { - // Profile the VM Thread itself if needed - // This is done without getting the Threads_lock and we can go deep - // inside Safepoint, etc. - if( ProfileVM ) { - ResourceMark rm; - ExtendedPC epc; - const char *name = NULL; - char buf[256]; - buf[0] = '\0'; - - vm_thread_profiler->inc_thread_ticks(); - - // Get a snapshot of a current VMThread pc (and leave it running!) - // The call may fail in some circumstances - epc = os::get_thread_pc(VMThread::vm_thread()); - if(epc.pc() != NULL) { - if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) { - name = buf; - } - } - if (name != NULL) { - vm_thread_profiler->vm_update(name, tp_native); - } - } -} - -void FlatProfiler::record_thread_ticks() { - - int maxthreads, suspendedthreadcount; - JavaThread** threadsList; - bool interval_expired = false; - - if (ProfileIntervals && - (FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) { - interval_expired = true; - interval_ticks_previous = FlatProfiler::received_ticks; - } - - // Try not to wait for the Threads_lock - if (Threads_lock->try_lock()) { - { // Threads_lock scope - maxthreads = Threads::number_of_threads(); - threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads, mtInternal); - suspendedthreadcount = 0; - for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { - if (tp->is_Compiler_thread()) { - // Only record ticks for active compiler threads - CompilerThread* cthread = (CompilerThread*)tp; - if (cthread->task() != NULL) { - // The compiler is active. If we need to access any of the fields - // of the compiler task we should suspend the CompilerThread first. - FlatProfiler::compiler_ticks += 1; - continue; - } - } - - // First externally suspend all threads by marking each for - // external suspension - so it will stop at its next transition - // Then do a safepoint - ThreadProfiler* pp = tp->get_thread_profiler(); - if (pp != NULL && pp->engaged) { - MutexLockerEx ml(tp->SR_lock(), Mutex::_no_safepoint_check_flag); - if (!tp->is_external_suspend() && !tp->is_exiting()) { - tp->set_external_suspend(); - threadsList[suspendedthreadcount++] = tp; - } - } - } - Threads_lock->unlock(); - } - // Suspend each thread. This call should just return - // for any threads that have already self-suspended - // Net result should be one safepoint - for (int j = 0; j < suspendedthreadcount; j++) { - JavaThread *tp = threadsList[j]; - if (tp) { - tp->java_suspend(); - } - } - - // We are responsible for resuming any thread on this list - for (int i = 0; i < suspendedthreadcount; i++) { - JavaThread *tp = threadsList[i]; - if (tp) { - ThreadProfiler* pp = tp->get_thread_profiler(); - if (pp != NULL && pp->engaged) { - HandleMark hm; - FlatProfiler::delivered_ticks += 1; - if (interval_expired) { - FlatProfiler::interval_record_thread(pp); - } - // This is the place where we check to see if a user thread is - // blocked waiting for compilation. - if (tp->blocked_on_compilation()) { - pp->compiler_ticks += 1; - pp->interval_data_ref()->inc_compiling(); - } else { - pp->record_tick(tp); - } - } - MutexLocker ml(Threads_lock); - tp->java_resume(); - } - } - if (interval_expired) { - FlatProfiler::interval_print(); - FlatProfiler::interval_reset(); - } - - FREE_C_HEAP_ARRAY(JavaThread *, threadsList); - } else { - // Couldn't get the threads lock, just record that rather than blocking - FlatProfiler::threads_lock_ticks += 1; - } - -} - -void FlatProfilerTask::task() { - FlatProfiler::received_ticks += 1; - - if (ProfileVM) { - FlatProfiler::record_vm_tick(); - } - - VM_Operation* op = VMThread::vm_operation(); - if (op != NULL) { - FlatProfiler::record_vm_operation(); - if (SafepointSynchronize::is_at_safepoint()) { - return; - } - } - FlatProfiler::record_thread_ticks(); -} - -void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) { - FlatProfiler::all_int_ticks++; - if (!FlatProfiler::full_profile()) { - return; - } - - if (!fr.is_interpreted_frame_valid(thread)) { - // tick came at a bad time - interpreter_ticks += 1; - FlatProfiler::interpreter_ticks += 1; - return; - } - - // The frame has been fully validated so we can trust the method and bci - - Method* method = *fr.interpreter_frame_method_addr(); - - interpreted_update(method, where); - - // update byte code table - InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc()); - if (desc != NULL && desc->bytecode() >= 0) { - ticks[desc->bytecode()]++; - } -} - -void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosition where) { - const char *name = NULL; - TickPosition localwhere = where; - - FlatProfiler::all_comp_ticks++; - if (!FlatProfiler::full_profile()) return; - - CodeBlob* cb = fr.cb(); - - // For runtime stubs, record as native rather than as compiled - if (cb->is_runtime_stub()) { - RegisterMap map(thread, false); - fr = fr.sender(&map); - cb = fr.cb(); - localwhere = tp_native; - } - - Method* method = cb->is_compiled() ? cb->as_compiled_method()->method() : (Method*) NULL; - if (method == NULL) { - if (cb->is_runtime_stub()) - runtime_stub_update(cb, name, localwhere); - else - unknown_compiled_update(cb, localwhere); - } - else { - if (method->is_native()) { - stub_update(method, name, localwhere); - } else { - compiled_update(method, localwhere); - } - } -} - -extern "C" void find(int x); - - -void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) { - // The tick happened in real code -> non VM code - if (fr.is_interpreted_frame()) { - interval_data_ref()->inc_interpreted(); - record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks); - return; - } - - if (CodeCache::contains(fr.pc())) { - interval_data_ref()->inc_compiled(); - PCRecorder::record(fr.pc()); - record_compiled_tick(thread, fr, tp_code); - return; - } - - if (VtableStubs::stub_containing(fr.pc()) != NULL) { - unknown_ticks_array[ut_vtable_stubs] += 1; - return; - } - - frame caller = fr.profile_find_Java_sender_frame(thread); - - if (caller.sp() != NULL && caller.pc() != NULL) { - record_tick_for_calling_frame(thread, caller); - return; - } - - unknown_ticks_array[ut_running_frame] += 1; - FlatProfiler::unknown_ticks += 1; -} - -void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) { - // The tick happened in VM code - interval_data_ref()->inc_native(); - if (fr.is_interpreted_frame()) { - record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub); - return; - } - if (CodeCache::contains(fr.pc())) { - record_compiled_tick(thread, fr, tp_native); - return; - } - - frame caller = fr.profile_find_Java_sender_frame(thread); - - if (caller.sp() != NULL && caller.pc() != NULL) { - record_tick_for_calling_frame(thread, caller); - return; - } - - unknown_ticks_array[ut_calling_frame] += 1; - FlatProfiler::unknown_ticks += 1; -} - -void ThreadProfiler::record_tick(JavaThread* thread) { - FlatProfiler::all_ticks++; - thread_ticks += 1; - - // Here's another way to track global state changes. - // When the class loader starts it marks the ThreadProfiler to tell it it is in the class loader - // and we check that here. - // This is more direct, and more than one thread can be in the class loader at a time, - // but it does mean the class loader has to know about the profiler. - if (region_flag[ThreadProfilerMark::classLoaderRegion]) { - class_loader_ticks += 1; - FlatProfiler::class_loader_ticks += 1; - return; - } else if (region_flag[ThreadProfilerMark::extraRegion]) { - extra_ticks += 1; - FlatProfiler::extra_ticks += 1; - return; - } - // Note that the WatcherThread can now stop for safepoints - uint32_t debug_bits = 0; - if (!thread->wait_for_ext_suspend_completion(SuspendRetryCount, - SuspendRetryDelay, &debug_bits)) { - unknown_ticks_array[ut_unknown_thread_state] += 1; - FlatProfiler::unknown_ticks += 1; - return; - } - - frame fr; - - switch (thread->thread_state()) { - case _thread_in_native: - case _thread_in_native_trans: - case _thread_in_vm: - case _thread_in_vm_trans: - if (thread->profile_last_Java_frame(&fr)) { - if (fr.is_runtime_frame()) { - RegisterMap map(thread, false); - fr = fr.sender(&map); - } - record_tick_for_calling_frame(thread, fr); - } else { - unknown_ticks_array[ut_no_last_Java_frame] += 1; - FlatProfiler::unknown_ticks += 1; - } - break; - // handle_special_runtime_exit_condition self-suspends threads in Java - case _thread_in_Java: - case _thread_in_Java_trans: - if (thread->profile_last_Java_frame(&fr)) { - if (fr.is_safepoint_blob_frame()) { - RegisterMap map(thread, false); - fr = fr.sender(&map); - } - record_tick_for_running_frame(thread, fr); - } else { - unknown_ticks_array[ut_no_last_Java_frame] += 1; - FlatProfiler::unknown_ticks += 1; - } - break; - case _thread_blocked: - case _thread_blocked_trans: - if (thread->osthread() && thread->osthread()->get_state() == RUNNABLE) { - if (thread->profile_last_Java_frame(&fr)) { - if (fr.is_safepoint_blob_frame()) { - RegisterMap map(thread, false); - fr = fr.sender(&map); - record_tick_for_running_frame(thread, fr); - } else { - record_tick_for_calling_frame(thread, fr); - } - } else { - unknown_ticks_array[ut_no_last_Java_frame] += 1; - FlatProfiler::unknown_ticks += 1; - } - } else { - blocked_ticks += 1; - FlatProfiler::blocked_ticks += 1; - } - break; - case _thread_uninitialized: - case _thread_new: - // not used, included for completeness - case _thread_new_trans: - unknown_ticks_array[ut_no_last_Java_frame] += 1; - FlatProfiler::unknown_ticks += 1; - break; - default: - unknown_ticks_array[ut_unknown_thread_state] += 1; - FlatProfiler::unknown_ticks += 1; - break; - } - return; -} - -void ThreadProfiler::engage() { - engaged = true; - timer.start(); -} - -void ThreadProfiler::disengage() { - engaged = false; - timer.stop(); -} - -void ThreadProfiler::initialize() { - for (int index = 0; index < table_size; index++) { - table[index] = NULL; - } - thread_ticks = 0; - blocked_ticks = 0; - compiler_ticks = 0; - interpreter_ticks = 0; - for (int ut = 0; ut < ut_end; ut += 1) { - unknown_ticks_array[ut] = 0; - } - region_flag[ThreadProfilerMark::classLoaderRegion] = false; - class_loader_ticks = 0; - region_flag[ThreadProfilerMark::extraRegion] = false; - extra_ticks = 0; - timer.start(); - interval_data_ref()->reset(); -} - -void ThreadProfiler::reset() { - timer.stop(); - if (table != NULL) { - for (int index = 0; index < table_size; index++) { - ProfilerNode* n = table[index]; - if (n != NULL) { - delete n; - } - } - } - initialize(); -} - -void FlatProfiler::allocate_table() { - { // Bytecode table - bytecode_ticks = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal); - bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal); - for(int index = 0; index < Bytecodes::number_of_codes; index++) { - bytecode_ticks[index] = 0; - bytecode_ticks_stub[index] = 0; - } - } - - if (ProfilerRecordPC) PCRecorder::init(); - - interval_data = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size, mtInternal); - FlatProfiler::interval_reset(); -} - -void FlatProfiler::engage(JavaThread* mainThread, bool fullProfile) { - full_profile_flag = fullProfile; - if (bytecode_ticks == NULL) { - allocate_table(); - } - if(ProfileVM && (vm_thread_profiler == NULL)){ - vm_thread_profiler = new ThreadProfiler(); - } - if (task == NULL) { - task = new FlatProfilerTask(WatcherThread::delay_interval); - task->enroll(); - } - timer.start(); - if (mainThread != NULL) { - // When mainThread was created, it might not have a ThreadProfiler - ThreadProfiler* pp = mainThread->get_thread_profiler(); - if (pp == NULL) { - mainThread->set_thread_profiler(new ThreadProfiler()); - } else { - pp->reset(); - } - mainThread->get_thread_profiler()->engage(); - } - // This is where we would assign thread_profiler - // if we wanted only one thread_profiler for all threads. - thread_profiler = NULL; -} - -void FlatProfiler::disengage() { - if (!task) { - return; - } - timer.stop(); - task->disenroll(); - delete task; - task = NULL; - if (thread_profiler != NULL) { - thread_profiler->disengage(); - } else { - MutexLocker tl(Threads_lock); - for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { - ThreadProfiler* pp = tp->get_thread_profiler(); - if (pp != NULL) { - pp->disengage(); - } - } - } -} - -void FlatProfiler::reset() { - if (task) { - disengage(); - } - - class_loader_ticks = 0; - extra_ticks = 0; - received_gc_ticks = 0; - vm_operation_ticks = 0; - compiler_ticks = 0; - deopt_ticks = 0; - interpreter_ticks = 0; - blocked_ticks = 0; - unknown_ticks = 0; - received_ticks = 0; - delivered_ticks = 0; - timer.stop(); -} - -bool FlatProfiler::is_active() { - return task != NULL; -} - -void FlatProfiler::print_byte_code_statistics() { - GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200); - - tty->print_cr(" Bytecode ticks:"); - for (int index = 0; index < Bytecodes::number_of_codes; index++) { - if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) { - tty->print_cr(" %4d %4d = %s", - FlatProfiler::bytecode_ticks[index], - FlatProfiler::bytecode_ticks_stub[index], - Bytecodes::name( (Bytecodes::Code) index)); - } - } - tty->cr(); -} - -void print_ticks(const char* title, int ticks, int total) { - if (ticks > 0) { - tty->print("%5.1f%% %5d", ticks * 100.0 / total, ticks); - tty->fill_to(col3); - tty->print("%s", title); - tty->cr(); - } -} - -void ThreadProfiler::print(const char* thread_name) { - ResourceMark rm; - MutexLocker ppl(ProfilePrint_lock); - int index = 0; // Declared outside for loops for portability - - if (table == NULL) { - return; - } - - if (thread_ticks <= 0) { - return; - } - - const char* title = "too soon to tell"; - double secs = timer.seconds(); - - GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200); - for(index = 0; index < table_size; index++) { - for(ProfilerNode* node = table[index]; node; node = node->next()) - array->append(node); - } - - array->sort(&ProfilerNode::compare); - - // compute total (sanity check) - int active = - class_loader_ticks + - compiler_ticks + - interpreter_ticks + - unknown_ticks(); - for (index = 0; index < array->length(); index++) { - active += array->at(index)->ticks.total(); - } - int total = active + blocked_ticks; - - tty->cr(); - tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name); - if (total != thread_ticks) { - print_ticks("Lost ticks", thread_ticks-total, thread_ticks); - } - tty->cr(); - - // print interpreted methods - tick_counter interpreted_ticks; - bool has_interpreted_ticks = false; - int print_count = 0; - for (index = 0; index < array->length(); index++) { - ProfilerNode* n = array->at(index); - if (n->is_interpreted()) { - interpreted_ticks.add(&n->ticks); - if (!has_interpreted_ticks) { - interpretedNode::print_title(tty); - has_interpreted_ticks = true; - } - if (print_count++ < ProfilerNumberOfInterpretedMethods) { - n->print(tty, active); - } - } - } - if (has_interpreted_ticks) { - if (print_count <= ProfilerNumberOfInterpretedMethods) { - title = "Total interpreted"; - } else { - title = "Total interpreted (including elided)"; - } - interpretedNode::print_total(tty, &interpreted_ticks, active, title); - tty->cr(); - } - - // print compiled methods - tick_counter compiled_ticks; - bool has_compiled_ticks = false; - print_count = 0; - for (index = 0; index < array->length(); index++) { - ProfilerNode* n = array->at(index); - if (n->is_compiled()) { - compiled_ticks.add(&n->ticks); - if (!has_compiled_ticks) { - compiledNode::print_title(tty); - has_compiled_ticks = true; - } - if (print_count++ < ProfilerNumberOfCompiledMethods) { - n->print(tty, active); - } - } - } - if (has_compiled_ticks) { - if (print_count <= ProfilerNumberOfCompiledMethods) { - title = "Total compiled"; - } else { - title = "Total compiled (including elided)"; - } - compiledNode::print_total(tty, &compiled_ticks, active, title); - tty->cr(); - } - - // print stub methods - tick_counter stub_ticks; - bool has_stub_ticks = false; - print_count = 0; - for (index = 0; index < array->length(); index++) { - ProfilerNode* n = array->at(index); - if (n->is_stub()) { - stub_ticks.add(&n->ticks); - if (!has_stub_ticks) { - stubNode::print_title(tty); - has_stub_ticks = true; - } - if (print_count++ < ProfilerNumberOfStubMethods) { - n->print(tty, active); - } - } - } - if (has_stub_ticks) { - if (print_count <= ProfilerNumberOfStubMethods) { - title = "Total stub"; - } else { - title = "Total stub (including elided)"; - } - stubNode::print_total(tty, &stub_ticks, active, title); - tty->cr(); - } - - // print runtime stubs - tick_counter runtime_stub_ticks; - bool has_runtime_stub_ticks = false; - print_count = 0; - for (index = 0; index < array->length(); index++) { - ProfilerNode* n = array->at(index); - if (n->is_runtime_stub()) { - runtime_stub_ticks.add(&n->ticks); - if (!has_runtime_stub_ticks) { - runtimeStubNode::print_title(tty); - has_runtime_stub_ticks = true; - } - if (print_count++ < ProfilerNumberOfRuntimeStubNodes) { - n->print(tty, active); - } - } - } - if (has_runtime_stub_ticks) { - if (print_count <= ProfilerNumberOfRuntimeStubNodes) { - title = "Total runtime stubs"; - } else { - title = "Total runtime stubs (including elided)"; - } - runtimeStubNode::print_total(tty, &runtime_stub_ticks, active, title); - tty->cr(); - } - - if (blocked_ticks + class_loader_ticks + interpreter_ticks + compiler_ticks + unknown_ticks() != 0) { - tty->fill_to(col1); - tty->print_cr("Thread-local ticks:"); - print_ticks("Blocked (of total)", blocked_ticks, total); - print_ticks("Class loader", class_loader_ticks, active); - print_ticks("Extra", extra_ticks, active); - print_ticks("Interpreter", interpreter_ticks, active); - print_ticks("Compilation", compiler_ticks, active); - print_ticks("Unknown: vtable stubs", unknown_ticks_array[ut_vtable_stubs], active); - print_ticks("Unknown: null method", unknown_ticks_array[ut_null_method], active); - print_ticks("Unknown: running frame", unknown_ticks_array[ut_running_frame], active); - print_ticks("Unknown: calling frame", unknown_ticks_array[ut_calling_frame], active); - print_ticks("Unknown: no pc", unknown_ticks_array[ut_no_pc], active); - print_ticks("Unknown: no last frame", unknown_ticks_array[ut_no_last_Java_frame], active); - print_ticks("Unknown: thread_state", unknown_ticks_array[ut_unknown_thread_state], active); - tty->cr(); - } - - if (WizardMode) { - tty->print_cr("Node area used: " INTX_FORMAT " Kb", (area_top - area_bottom) / 1024); - } - reset(); -} - -/* -ThreadProfiler::print_unknown(){ - if (table == NULL) { - return; - } - - if (thread_ticks <= 0) { - return; - } -} */ - -void FlatProfiler::print(int unused) { - ResourceMark rm; - if (thread_profiler != NULL) { - thread_profiler->print("All threads"); - } else { - MutexLocker tl(Threads_lock); - for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { - ThreadProfiler* pp = tp->get_thread_profiler(); - if (pp != NULL) { - pp->print(tp->get_thread_name()); - } - } - } - - if (ProfilerPrintByteCodeStatistics) { - print_byte_code_statistics(); - } - - if (non_method_ticks() > 0) { - tty->cr(); - tty->print_cr("Global summary of %3.2f seconds:", timer.seconds()); - print_ticks("Received ticks", received_ticks, received_ticks); - print_ticks("Received GC ticks", received_gc_ticks, received_ticks); - print_ticks("Compilation", compiler_ticks, received_ticks); - print_ticks("Deoptimization", deopt_ticks, received_ticks); - print_ticks("Other VM operations", vm_operation_ticks, received_ticks); -#ifndef PRODUCT - print_ticks("Blocked ticks", blocked_ticks, received_ticks); - print_ticks("Threads_lock blocks", threads_lock_ticks, received_ticks); - print_ticks("Delivered ticks", delivered_ticks, received_ticks); - print_ticks("All ticks", all_ticks, received_ticks); -#endif - print_ticks("Class loader", class_loader_ticks, received_ticks); - print_ticks("Extra ", extra_ticks, received_ticks); - print_ticks("Interpreter", interpreter_ticks, received_ticks); - print_ticks("Unknown code", unknown_ticks, received_ticks); - } - - PCRecorder::print(); - - if(ProfileVM){ - tty->cr(); - vm_thread_profiler->print("VM Thread"); - } -} - -void IntervalData::print_header(outputStream* st) { - st->print("i/c/n/g"); -} - -void IntervalData::print_data(outputStream* st) { - st->print("%d/%d/%d/%d", interpreted(), compiled(), native(), compiling()); -} - -void FlatProfiler::interval_record_thread(ThreadProfiler* tp) { - IntervalData id = tp->interval_data(); - int total = id.total(); - tp->interval_data_ref()->reset(); - - // Insertion sort the data, if it's relevant. - for (int i = 0; i < interval_print_size; i += 1) { - if (total > interval_data[i].total()) { - for (int j = interval_print_size - 1; j > i; j -= 1) { - interval_data[j] = interval_data[j-1]; - } - interval_data[i] = id; - break; - } - } -} - -void FlatProfiler::interval_print() { - if ((interval_data[0].total() > 0)) { - tty->stamp(); - tty->print("\t"); - IntervalData::print_header(tty); - for (int i = 0; i < interval_print_size; i += 1) { - if (interval_data[i].total() > 0) { - tty->print("\t"); - interval_data[i].print_data(tty); - } - } - tty->cr(); - } -} - -void FlatProfiler::interval_reset() { - for (int i = 0; i < interval_print_size; i += 1) { - interval_data[i].reset(); - } -} - -void ThreadProfiler::oops_do(OopClosure* f) { - if (table == NULL) return; - - for(int index = 0; index < table_size; index++) { - for(ProfilerNode* node = table[index]; node; node = node->next()) - node->oops_do(f); - } -} - -void FlatProfiler::oops_do(OopClosure* f) { - if (thread_profiler != NULL) { - thread_profiler->oops_do(f); - } else { - for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { - ThreadProfiler* pp = tp->get_thread_profiler(); - if (pp != NULL) { - pp->oops_do(f); - } - } - } -}
--- a/hotspot/src/share/vm/runtime/fprofiler.hpp Thu Aug 31 17:06:10 2017 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,319 +0,0 @@ -/* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_RUNTIME_FPROFILER_HPP -#define SHARE_VM_RUNTIME_FPROFILER_HPP - -#include "utilities/macros.hpp" -#include "runtime/timer.hpp" - -// a simple flat profiler for Java - - -// Forward declaration of classes defined in this header file -class ThreadProfiler; -class ThreadProfilerMark; -class FlatProfiler; -class IntervalData; - -// Declarations of classes defined only in the implementation. -class ProfilerNode; -class FlatProfilerTask; - -enum TickPosition { - tp_code, - tp_native -}; - -// One of these guys is constructed as we enter interesting regions -// and destructed as we exit the region. While we are in the region -// ticks are allotted to the region. -class ThreadProfilerMark: public StackObj { -public: - // For now, the only thread-specific region is the class loader. - enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion }; - - ThreadProfilerMark(Region) NOT_FPROF_RETURN; - ~ThreadProfilerMark() NOT_FPROF_RETURN; - -private: - ThreadProfiler* _pp; - Region _r; -}; - -#if INCLUDE_FPROF - -class IntervalData VALUE_OBJ_CLASS_SPEC { - // Just to keep these things all together -private: - int _interpreted; - int _compiled; - int _native; - int _compiling; -public: - int interpreted() { - return _interpreted; - } - int compiled() { - return _compiled; - } - int native() { - return _native; - } - int compiling() { - return _compiling; - } - int total() { - return (interpreted() + compiled() + native() + compiling()); - } - void inc_interpreted() { - _interpreted += 1; - } - void inc_compiled() { - _compiled += 1; - } - void inc_native() { - _native += 1; - } - void inc_compiling() { - _compiling += 1; - } - void reset() { - _interpreted = 0; - _compiled = 0; - _native = 0; - _compiling = 0; - } - static void print_header(outputStream* st); - void print_data(outputStream* st); -}; -#endif // INCLUDE_FPROF - -class ThreadProfiler: public CHeapObj<mtInternal> { -public: - ThreadProfiler() NOT_FPROF_RETURN; - ~ThreadProfiler() NOT_FPROF_RETURN; - - // Resets the profiler - void reset() NOT_FPROF_RETURN; - - // Activates the profiler for a certain thread - void engage() NOT_FPROF_RETURN; - - // Deactivates the profiler - void disengage() NOT_FPROF_RETURN; - - // Prints the collected profiling information - void print(const char* thread_name) NOT_FPROF_RETURN; - - // Garbage Collection Support - void oops_do(OopClosure* f) NOT_FPROF_RETURN; - -#if INCLUDE_FPROF -private: - // for recording ticks. - friend class ProfilerNode; - char* area_bottom; // preallocated area for pnodes - char* area_top; - char* area_limit; - static int table_size; - ProfilerNode** table; - -private: - void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks); - void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where); - void interpreted_update(Method* method, TickPosition where); - void compiled_update (Method* method, TickPosition where); - void stub_update (Method* method, const char* name, TickPosition where); - void adapter_update (TickPosition where); - - void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where); - void unknown_compiled_update (const CodeBlob* cb, TickPosition where); - - void vm_update (TickPosition where); - void vm_update (const char* name, TickPosition where); - - void record_tick_for_running_frame(JavaThread* thread, frame fr); - void record_tick_for_calling_frame(JavaThread* thread, frame fr); - - void initialize(); - - static int entry(int value); - - -private: - friend class FlatProfiler; - void record_tick(JavaThread* thread); - bool engaged; - // so we can do percentages for this thread, and quick checks for activity - int thread_ticks; - int compiler_ticks; - int interpreter_ticks; - -public: - void inc_thread_ticks() { thread_ticks += 1; } - -private: - friend class ThreadProfilerMark; - // counters for thread-specific regions - bool region_flag[ThreadProfilerMark::maxRegion]; - int class_loader_ticks; - int extra_ticks; - -private: - // other thread-specific regions - int blocked_ticks; - enum UnknownTickSites { - ut_null_method, - ut_vtable_stubs, - ut_running_frame, - ut_calling_frame, - ut_no_pc, - ut_no_last_Java_frame, - ut_unknown_thread_state, - ut_end - }; - int unknown_ticks_array[ut_end]; - int unknown_ticks() { - int result = 0; - for (int ut = 0; ut < ut_end; ut += 1) { - result += unknown_ticks_array[ut]; - } - return result; - } - - elapsedTimer timer; - - // For interval timing -private: - IntervalData _interval_data; - IntervalData interval_data() { - return _interval_data; - } - IntervalData* interval_data_ref() { - return &_interval_data; - } -#endif // INCLUDE_FPROF -}; - -class FlatProfiler: AllStatic { -public: - static void reset() NOT_FPROF_RETURN ; - static void engage(JavaThread* mainThread, bool fullProfile) NOT_FPROF_RETURN ; - static void disengage() NOT_FPROF_RETURN ; - static void print(int unused) NOT_FPROF_RETURN ; - static bool is_active() NOT_FPROF_RETURN_(false) ; - - // This is NULL if each thread has its own thread profiler, - // else this is the single thread profiler used by all threads. - // In particular it makes a difference during garbage collection, - // where you only want to traverse each thread profiler once. - static ThreadProfiler* get_thread_profiler() NOT_FPROF_RETURN_(NULL); - - // Garbage Collection Support - static void oops_do(OopClosure* f) NOT_FPROF_RETURN ; - - // Support for disassembler to inspect the PCRecorder - - // Returns the start address for a given pc - // NULL is returned if the PCRecorder is inactive - static address bucket_start_for(address pc) NOT_FPROF_RETURN_(NULL); - - enum { MillisecsPerTick = 10 }; // ms per profiling ticks - - // Returns the number of ticks recorded for the bucket - // pc belongs to. - static int bucket_count_for(address pc) NOT_FPROF_RETURN_(0); - -#if INCLUDE_FPROF - - private: - static bool full_profile() { - return full_profile_flag; - } - - friend class ThreadProfiler; - // the following group of ticks cover everything that's not attributed to individual Java methods - static int received_gc_ticks; // ticks during which gc was active - static int vm_operation_ticks; // total ticks in vm_operations other than GC - static int threads_lock_ticks; // the number of times we couldn't get the Threads_lock without blocking - static int blocked_ticks; // ticks when the thread was blocked. - static int class_loader_ticks; // total ticks in class loader - static int extra_ticks; // total ticks an extra temporary measuring - static int compiler_ticks; // total ticks in compilation - static int interpreter_ticks; // ticks in unknown interpreted method - static int deopt_ticks; // ticks in deoptimization - static int unknown_ticks; // ticks that cannot be categorized - static int received_ticks; // ticks that were received by task - static int delivered_ticks; // ticks that were delivered by task - static int non_method_ticks() { - return - ( received_gc_ticks - + vm_operation_ticks - + deopt_ticks - + threads_lock_ticks - + blocked_ticks - + compiler_ticks - + interpreter_ticks - + unknown_ticks ); - } - static elapsedTimer timer; - - // Counts of each of the byte codes - static int* bytecode_ticks; - static int* bytecode_ticks_stub; - static void print_byte_code_statistics(); - - // the ticks below are for continuous profiling (to adjust recompilation, etc.) - static int all_ticks; // total count of ticks received so far - static int all_int_ticks; // ticks in interpreter - static int all_comp_ticks; // ticks in compiled code (+ native) - static bool full_profile_flag; // collecting full profile? - - // to accumulate thread-specific data - // if we aren't profiling individual threads. - static ThreadProfiler* thread_profiler; - static ThreadProfiler* vm_thread_profiler; - - static void allocate_table(); - - // The task that periodically interrupts things. - friend class FlatProfilerTask; - static FlatProfilerTask* task; - static void record_vm_operation(); - static void record_vm_tick(); - static void record_thread_ticks(); - - // For interval analysis - private: - static int interval_ticks_previous; // delivered_ticks from the last interval - static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler. - static void interval_print(); // print interval data. - static void interval_reset(); // reset interval data. - enum {interval_print_size = 10}; - static IntervalData* interval_data; -#endif // INCLUDE_FPROF -}; - -#endif // SHARE_VM_RUNTIME_FPROFILER_HPP
--- a/hotspot/src/share/vm/runtime/java.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/runtime/java.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -55,7 +55,6 @@ #include "runtime/biasedLocking.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/java.hpp" @@ -465,12 +464,6 @@ WatcherThread::stop(); } - // Print statistics gathered (profiling ...) - if (Arguments::has_profile()) { - FlatProfiler::disengage(); - FlatProfiler::print(10); - } - // shut down the StatSampler task StatSampler::disengage(); StatSampler::destroy();
--- a/hotspot/src/share/vm/runtime/os.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/runtime/os.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -476,7 +476,6 @@ static frame fetch_frame_from_context(const void* ucVoid); static frame fetch_frame_from_ucontext(Thread* thread, void* ucVoid); - static ExtendedPC get_thread_pc(Thread *thread); static void breakpoint(); static bool start_debugging(char *buf, int buflen);
--- a/hotspot/src/share/vm/runtime/thread.cpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/runtime/thread.cpp Thu Aug 31 20:26:53 2017 -0500 @@ -63,7 +63,6 @@ #include "runtime/commandLineFlagWriteableList.hpp" #include "runtime/commandLineFlagRangeList.hpp" #include "runtime/deoptimization.hpp" -#include "runtime/fprofiler.hpp" #include "runtime/frame.inline.hpp" #include "runtime/globals.hpp" #include "runtime/init.hpp" @@ -748,19 +747,6 @@ } #endif // PRODUCT -// Called by flat profiler -// Callers have already called wait_for_ext_suspend_completion -// The assertion for that is currently too complex to put here: -bool JavaThread::profile_last_Java_frame(frame* _fr) { - bool gotframe = false; - // self suspension saves needed state. - if (has_last_Java_frame() && _anchor.walkable()) { - *_fr = pd_last_frame(); - gotframe = true; - } - return gotframe; -} - void Thread::interrupt(Thread* thread) { debug_only(check_for_dangling_thread_pointer(thread);) os::interrupt(thread); @@ -1381,14 +1367,6 @@ while (watcher_thread() != NULL) { // This wait should make safepoint checks, wait without a timeout, // and wait as a suspend-equivalent condition. - // - // Note: If the FlatProfiler is running, then this thread is waiting - // for the WatcherThread to terminate and the WatcherThread, via the - // FlatProfiler task, is waiting for the external suspend request on - // this thread to complete. wait_for_ext_suspend_completion() will - // eventually timeout, but that takes time. Making this wait a - // suspend-equivalent condition solves that timeout problem. - // Terminator_lock->wait(!Mutex::_no_safepoint_check_flag, 0, Mutex::_as_suspend_equivalent_flag); } @@ -1505,16 +1483,6 @@ } #endif // PRODUCT - set_thread_profiler(NULL); - if (FlatProfiler::is_active()) { - // This is where we would decide to either give each thread it's own profiler - // or use one global one from FlatProfiler, - // or up to some count of the number of profiled threads, etc. - ThreadProfiler* pp = new ThreadProfiler(); - pp->engage(); - set_thread_profiler(pp); - } - // Setup safepoint state info for this thread ThreadSafepointState::create(this); @@ -1660,7 +1628,6 @@ // All Java related clean up happens in exit ThreadSafepointState::destroy(this); - if (_thread_profiler != NULL) delete _thread_profiler; if (_thread_stat != NULL) delete _thread_stat; #if INCLUDE_JVMCI @@ -1775,13 +1742,6 @@ Handle threadObj(this, this->threadObj()); assert(threadObj.not_null(), "Java thread object should be created"); - if (get_thread_profiler() != NULL) { - get_thread_profiler()->disengage(); - ResourceMark rm; - get_thread_profiler()->print(get_thread_name()); - } - - // FIXIT: This code should be moved into else part, when reliable 1.2/1.3 check is in place { EXCEPTION_MARK; @@ -1983,12 +1943,6 @@ #endif // INCLUDE_ALL_GCS void JavaThread::cleanup_failed_attach_current_thread() { - if (get_thread_profiler() != NULL) { - get_thread_profiler()->disengage(); - ResourceMark rm; - get_thread_profiler()->print(get_thread_name()); - } - if (active_handles() != NULL) { JNIHandleBlock* block = active_handles(); set_active_handles(NULL); @@ -2786,9 +2740,6 @@ // Verify that the deferred card marks have been flushed. assert(deferred_card_mark().is_empty(), "Should be empty during GC"); - // The ThreadProfiler oops_do is done from FlatProfiler::oops_do - // since there may be more than one thread using each ThreadProfiler. - // Traverse the GCHandles Thread::oops_do(f, cf); @@ -3841,7 +3792,6 @@ } #endif // INCLUDE_MANAGEMENT - if (Arguments::has_profile()) FlatProfiler::engage(main_thread, true); if (MemProfiling) MemProfiler::engage(); StatSampler::engage(); if (CheckJNICalls) JniPeriodicChecker::engage(); @@ -4136,7 +4086,7 @@ // + Call before_exit(), prepare for VM exit // > run VM level shutdown hooks (they are registered through JVM_OnExit(), // currently the only user of this mechanism is File.deleteOnExit()) -// > stop flat profiler, StatSampler, watcher thread, CMS threads, +// > stop StatSampler, watcher thread, CMS threads, // post thread end and vm death events to JVMTI, // stop signal thread // + Call JavaThread::exit(), it will: @@ -4165,14 +4115,6 @@ while (Threads::number_of_non_daemon_threads() > 1) // This wait should make safepoint checks, wait without a timeout, // and wait as a suspend-equivalent condition. - // - // Note: If the FlatProfiler is running and this thread is waiting - // for another non-daemon thread to finish, then the FlatProfiler - // is waiting for the external suspend request on this thread to - // complete. wait_for_ext_suspend_completion() will eventually - // timeout, but that takes time. Making this wait a suspend- - // equivalent condition solves that timeout problem. - // Threads_lock->wait(!Mutex::_no_safepoint_check_flag, 0, Mutex::_as_suspend_equivalent_flag); }
--- a/hotspot/src/share/vm/runtime/thread.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/runtime/thread.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -56,7 +56,6 @@ #endif class ThreadSafepointState; -class ThreadProfiler; class JvmtiThreadState; class JvmtiGetLoadedClassesClosure; @@ -1720,23 +1719,6 @@ void deoptimized_wrt_marked_nmethods(); - // Profiling operation (see fprofile.cpp) - public: - bool profile_last_Java_frame(frame* fr); - - private: - ThreadProfiler* _thread_profiler; - private: - friend class FlatProfiler; // uses both [gs]et_thread_profiler. - friend class FlatProfilerTask; // uses get_thread_profiler. - friend class ThreadProfilerMark; // uses get_thread_profiler. - ThreadProfiler* get_thread_profiler() { return _thread_profiler; } - ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) { - ThreadProfiler* result = _thread_profiler; - _thread_profiler = tp; - return result; - } - public: // Returns the running thread as a JavaThread static inline JavaThread* current();
--- a/hotspot/src/share/vm/utilities/macros.hpp Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/src/share/vm/utilities/macros.hpp Thu Aug 31 20:26:53 2017 -0500 @@ -65,18 +65,6 @@ #define NOT_JVMTI_RETURN_(code) { return code; } #endif // INCLUDE_JVMTI -#ifndef INCLUDE_FPROF -#define INCLUDE_FPROF 1 -#endif - -#if INCLUDE_FPROF -#define NOT_FPROF_RETURN /* next token must be ; */ -#define NOT_FPROF_RETURN_(code) /* next token must be ; */ -#else -#define NOT_FPROF_RETURN {} -#define NOT_FPROF_RETURN_(code) { return code; } -#endif // INCLUDE_FPROF - #ifndef INCLUDE_VM_STRUCTS #define INCLUDE_VM_STRUCTS 1 #endif
--- a/hotspot/test/gc/g1/TestGCLogMessages.java Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/test/gc/g1/TestGCLogMessages.java Thu Aug 31 20:26:53 2017 -0500 @@ -110,7 +110,6 @@ new LogMessageWithLevel("Universe Roots", Level.TRACE), new LogMessageWithLevel("JNI Handles Roots", Level.TRACE), new LogMessageWithLevel("ObjectSynchronizer Roots", Level.TRACE), - new LogMessageWithLevel("FlatProfiler Roots", Level.TRACE), new LogMessageWithLevel("Management Roots", Level.TRACE), new LogMessageWithLevel("SystemDictionary Roots", Level.TRACE), new LogMessageWithLevel("CLDG Roots", Level.TRACE),
--- a/hotspot/test/runtime/CommandLine/TestNullTerminatedFlags.java Thu Aug 31 17:06:10 2017 +0000 +++ b/hotspot/test/runtime/CommandLine/TestNullTerminatedFlags.java Thu Aug 31 20:26:53 2017 -0500 @@ -42,7 +42,6 @@ "-green", "-native", "-Xrs", - "-Xprof", "-Xconcurrentio", "-Xinternalversion", "-Xprintflags",
--- a/hotspot/test/runtime/MinimalVM/Xprof.java Thu Aug 31 17:06:10 2017 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @requires vm.flavor == "minimal" - * @modules java.base/jdk.internal.misc - * @library /test/lib - * @run driver Xprof - */ - -import jdk.test.lib.process.OutputAnalyzer; -import jdk.test.lib.process.ProcessTools; - -public class Xprof { - - public static void main(String args[]) throws Exception { - ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-minimal", "-Xprof", "-version"); - new OutputAnalyzer(pb.start()) - .shouldContain("Flat profiling is not supported in this VM.") - .shouldHaveExitValue(1); - - } -}