8166377: is_compiled_by_jvmci hot in some profiles - improve nmethod compiler type detection
Summary: Refactor code removing virtual call
Reviewed-by: kvn, twisti
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -42,7 +42,7 @@
#include "runtime/sharedRuntime.hpp"
-Compiler::Compiler() : AbstractCompiler(c1) {
+Compiler::Compiler() : AbstractCompiler(compiler_c1) {
}
void Compiler::init_c1_runtime() {
--- a/hotspot/src/share/vm/code/codeBlob.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -45,6 +45,10 @@
#include "c1/c1_Runtime1.hpp"
#endif
+const char* CodeBlob::compiler_name() const {
+ return compilertype2name(_type);
+}
+
unsigned int CodeBlob::align_code_offset(int offset) {
// align the size to CodeEntryAlignment
return
@@ -65,7 +69,7 @@
return size;
}
-CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
+CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
@@ -80,7 +84,8 @@
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
- _content_begin(layout.content_begin())
+ _content_begin(layout.content_begin()),
+ _type(type)
{
assert(layout.size() == round_to(layout.size(), oopSize), "unaligned size");
assert(layout.header_size() == round_to(layout.header_size(), oopSize), "unaligned size");
@@ -92,7 +97,7 @@
#endif // COMPILER1
}
-CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
+CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
@@ -106,7 +111,8 @@
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
- _content_begin(layout.content_begin())
+ _content_begin(layout.content_begin()),
+ _type(type)
{
assert(_size == round_to(_size, oopSize), "unaligned size");
assert(_header_size == round_to(_header_size, oopSize), "unaligned size");
@@ -123,7 +129,7 @@
// Creates a simple CodeBlob. Sets up the size of the different regions.
RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
- : CodeBlob(name, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
+ : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
{
assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
assert(!UseRelocIndex, "no space allocated for reloc index yet");
@@ -148,7 +154,7 @@
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments
-) : CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
+) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
cb->copy_code_and_locs_to(this);
}
--- a/hotspot/src/share/vm/code/codeBlob.hpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.hpp Fri Oct 21 20:12:47 2016 +0200
@@ -26,6 +26,7 @@
#define SHARE_VM_CODE_CODEBLOB_HPP
#include "asm/codeBuffer.hpp"
+#include "compiler/compilerDefinitions.hpp"
#include "compiler/oopMap.hpp"
#include "runtime/frame.hpp"
#include "runtime/handles.hpp"
@@ -71,7 +72,8 @@
friend class CodeCacheDumper;
protected:
- const char* _name;
+
+ const CompilerType _type; // CompilerType
int _size; // total size of CodeBlob in bytes
int _header_size; // size of header (depends on subclass)
int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
@@ -92,9 +94,10 @@
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
bool _caller_must_gc_arguments;
CodeStrings _strings;
+ const char* _name;
- CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
- CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
+ CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
+ CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public:
// Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
@@ -115,9 +118,11 @@
virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_compiled() const { return false; }
- virtual bool is_compiled_by_c2() const { return false; }
- virtual bool is_compiled_by_c1() const { return false; }
- virtual bool is_compiled_by_jvmci() const { return false; }
+ inline bool is_compiled_by_c1() const { return _type == compiler_c1; };
+ inline bool is_compiled_by_c2() const { return _type == compiler_c2; };
+ inline bool is_compiled_by_jvmci() const { return _type == compiler_jvmci; };
+ inline bool is_compiled_by_shark() const { return _type == compiler_shark; };
+ const char* compiler_name() const;
// Casting
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }
--- a/hotspot/src/share/vm/code/compiledMethod.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/code/compiledMethod.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -31,14 +31,14 @@
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
-CompiledMethod::CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
- : CodeBlob(name, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
+CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
+ : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults();
}
-CompiledMethod::CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
- : CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
+CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
+ : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults();
}
--- a/hotspot/src/share/vm/code/compiledMethod.hpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/code/compiledMethod.hpp Fri Oct 21 20:12:47 2016 +0200
@@ -164,8 +164,8 @@
virtual void flush() = 0;
protected:
- CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
- CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
+ CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
+ CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public:
virtual bool is_compiled() const { return true; }
@@ -191,12 +191,10 @@
// will be transformed to zombie immediately
};
- virtual AbstractCompiler* compiler() const = 0;
virtual bool is_in_use() const = 0;
virtual int comp_level() const = 0;
virtual int compile_id() const = 0;
-
virtual address verified_entry_point() const = 0;
virtual void log_identity(xmlStream* log) const = 0;
virtual void log_state_change() const = 0;
--- a/hotspot/src/share/vm/code/nmethod.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -82,32 +82,6 @@
#endif
-bool nmethod::is_compiled_by_c1() const {
- if (compiler() == NULL) {
- return false;
- }
- return compiler()->is_c1();
-}
-bool nmethod::is_compiled_by_jvmci() const {
- if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
- if (is_native_method()) return false;
- return compiler()->is_jvmci();
-}
-bool nmethod::is_compiled_by_c2() const {
- if (compiler() == NULL) {
- return false;
- }
- return compiler()->is_c2();
-}
-bool nmethod::is_compiled_by_shark() const {
- if (compiler() == NULL) {
- return false;
- }
- return compiler()->is_shark();
-}
-
-
-
//---------------------------------------------------------------------------------
// NMethod statistics
// They are printed under various flags, including:
@@ -440,7 +414,6 @@
_scavenge_root_link = NULL;
}
_scavenge_root_state = 0;
- _compiler = NULL;
#if INCLUDE_RTM_OPT
_rtm_state = NoRTM;
#endif
@@ -468,7 +441,7 @@
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
- nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
+ nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), compiler_none, native_nmethod_size,
compile_id, &offsets,
code_buffer, frame_size,
basic_lock_owner_sp_offset,
@@ -518,7 +491,7 @@
+ round_to(debug_info->data_size() , oopSize);
nm = new (nmethod_size, comp_level)
- nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
+ nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps,
handler_table,
@@ -569,6 +542,7 @@
// For native wrappers
nmethod::nmethod(
Method* method,
+ CompilerType type,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
@@ -577,7 +551,7 @@
ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps )
- : CompiledMethod(method, "native nmethod", nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
+ : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
_native_basic_lock_sp_offset(basic_lock_sp_offset)
{
@@ -666,6 +640,7 @@
nmethod::nmethod(
Method* method,
+ CompilerType type,
int nmethod_size,
int compile_id,
int entry_bci,
@@ -685,7 +660,7 @@
Handle speculation_log
#endif
)
- : CompiledMethod(method, "nmethod", nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
+ : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1))
{
@@ -701,7 +676,6 @@
_entry_bci = entry_bci;
_compile_id = compile_id;
_comp_level = comp_level;
- _compiler = compiler;
_orig_pc_offset = orig_pc_offset;
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
@@ -803,9 +777,7 @@
log->print(" compile_id='%d'", compile_id());
const char* nm_kind = compile_kind();
if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind);
- if (compiler() != NULL) {
- log->print(" compiler='%s'", compiler()->name());
- }
+ log->print(" compiler='%s'", compiler_name());
if (TieredCompilation) {
log->print(" level='%d'", comp_level());
}
--- a/hotspot/src/share/vm/code/nmethod.hpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp Fri Oct 21 20:12:47 2016 +0200
@@ -74,8 +74,6 @@
static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link;
- AbstractCompiler* _compiler; // The compiler which compiled this nmethod
-
// offsets for entry points
address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check
@@ -166,6 +164,7 @@
// For native wrappers
nmethod(Method* method,
+ CompilerType type,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
@@ -177,6 +176,7 @@
// Creation support
nmethod(Method* method,
+ CompilerType type,
int nmethod_size,
int compile_id,
int entry_bci,
@@ -251,18 +251,10 @@
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps);
- // accessors
- AbstractCompiler* compiler() const { return _compiler; }
-
// type info
bool is_nmethod() const { return true; }
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
- bool is_compiled_by_c1() const;
- bool is_compiled_by_jvmci() const;
- bool is_compiled_by_c2() const;
- bool is_compiled_by_shark() const;
-
// boundaries for different parts
address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return code_begin() ; }
--- a/hotspot/src/share/vm/compiler/abstractCompiler.hpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/compiler/abstractCompiler.hpp Fri Oct 21 20:12:47 2016 +0200
@@ -26,6 +26,7 @@
#define SHARE_VM_COMPILER_ABSTRACTCOMPILER_HPP
#include "ci/compilerInterface.hpp"
+#include "compiler/compilerDefinitions.hpp"
#include "compiler/compilerDirectives.hpp"
typedef void (*initializer)(void);
@@ -82,24 +83,15 @@
// This thread will initialize the compiler runtime.
bool should_perform_init();
- // The (closed set) of concrete compiler classes.
- enum Type {
- none,
- c1,
- c2,
- jvmci,
- shark
- };
-
private:
- Type _type;
+ const CompilerType _type;
#if INCLUDE_JVMCI
CompilerStatistics _stats;
#endif
public:
- AbstractCompiler(Type type) : _type(type), _compiler_state(uninitialized), _num_compiler_threads(0) {}
+ AbstractCompiler(CompilerType type) : _type(type), _compiler_state(uninitialized), _num_compiler_threads(0) {}
// This function determines the compiler thread that will perform the
// shutdown of the corresponding compiler runtime.
@@ -157,10 +149,11 @@
}
// Compiler type queries.
- bool is_c1() { return _type == c1; }
- bool is_c2() { return _type == c2; }
- bool is_jvmci() { return _type == jvmci; }
- bool is_shark() { return _type == shark; }
+ const bool is_c1() { return _type == compiler_c1; }
+ const bool is_c2() { return _type == compiler_c2; }
+ const bool is_jvmci() { return _type == compiler_jvmci; }
+ const bool is_shark() { return _type == compiler_shark; }
+ const CompilerType type() { return _type; }
// Extra tests to identify trivial methods for the tiered compilation policy.
virtual bool is_trivial(Method* method) { return false; }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/compiler/compilerDefinitions.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "compiler/compilerDefinitions.hpp"
+
+const char* compilertype2name_tab[compiler_number_of_types] = {
+ "",
+ "c1",
+ "c2",
+ "jvmci",
+ "shark"
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/compiler/compilerDefinitions.hpp Fri Oct 21 20:12:47 2016 +0200
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
+#define SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
+
+#include "utilities/globalDefinitions.hpp"
+
+// The (closed set) of concrete compiler classes.
+enum CompilerType {
+ compiler_none,
+ compiler_c1,
+ compiler_c2,
+ compiler_jvmci,
+ compiler_shark,
+ compiler_number_of_types
+};
+
+extern const char* compilertype2name_tab[compiler_number_of_types]; // Map CompilerType to its name
+inline const char* compilertype2name(CompilerType t) { return (uint)t < compiler_number_of_types ? compilertype2name_tab[t] : NULL; }
+
+// Handy constants for deciding which compiler mode to use.
+enum MethodCompilation {
+ InvocationEntryBci = -1 // i.e., not a on-stack replacement compilation
+};
+
+// Enumeration to distinguish tiers of compilation
+enum CompLevel {
+ CompLevel_any = -1,
+ CompLevel_all = -1,
+ CompLevel_none = 0, // Interpreter
+ CompLevel_simple = 1, // C1
+ CompLevel_limited_profile = 2, // C1, invocation & backedge counters
+ CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo
+ CompLevel_full_optimization = 4, // C2, Shark or JVMCI
+
+#if defined(COMPILER2) || defined(SHARK)
+ CompLevel_highest_tier = CompLevel_full_optimization, // pure C2 and tiered or JVMCI and tiered
+#elif defined(COMPILER1)
+ CompLevel_highest_tier = CompLevel_simple, // pure C1 or JVMCI
+#else
+ CompLevel_highest_tier = CompLevel_none,
+#endif
+
+#if defined(TIERED)
+ CompLevel_initial_compile = CompLevel_full_profile // tiered
+#elif defined(COMPILER1) || INCLUDE_JVMCI
+ CompLevel_initial_compile = CompLevel_simple // pure C1 or JVMCI
+#elif defined(COMPILER2) || defined(SHARK)
+ CompLevel_initial_compile = CompLevel_full_optimization // pure C2
+#else
+ CompLevel_initial_compile = CompLevel_none
+#endif
+};
+
+inline bool is_c1_compile(int comp_level) {
+ return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
+}
+
+inline bool is_c2_compile(int comp_level) {
+ return comp_level == CompLevel_full_optimization;
+}
+
+inline bool is_highest_tier_compile(int comp_level) {
+ return comp_level == CompLevel_highest_tier;
+}
+
+inline bool is_compile(int comp_level) {
+ return is_c1_compile(comp_level) || is_c2_compile(comp_level);
+}
+
+// States of Restricted Transactional Memory usage.
+enum RTMState {
+ NoRTM = 0x2, // Don't use RTM
+ UseRTM = 0x1, // Use RTM
+ ProfileRTM = 0x0 // Use RTM with abort ratio calculation
+};
+
+#ifndef INCLUDE_RTM_OPT
+#define INCLUDE_RTM_OPT 0
+#endif
+#if INCLUDE_RTM_OPT
+#define RTM_OPT_ONLY(code) code
+#else
+#define RTM_OPT_ONLY(code)
+#endif
+
+#endif // SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
--- a/hotspot/src/share/vm/jvmci/jvmciCompiler.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/jvmci/jvmciCompiler.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -37,7 +37,7 @@
JVMCICompiler* JVMCICompiler::_instance = NULL;
elapsedTimer JVMCICompiler::_codeInstallTimer;
-JVMCICompiler::JVMCICompiler() : AbstractCompiler(jvmci) {
+JVMCICompiler::JVMCICompiler() : AbstractCompiler(compiler_jvmci) {
_bootstrapping = false;
_bootstrap_compilation_request_handled = false;
_methods_compiled = 0;
--- a/hotspot/src/share/vm/oops/method.hpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp Fri Oct 21 20:12:47 2016 +0200
@@ -27,6 +27,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/compressedStream.hpp"
+#include "compiler/compilerDefinitions.hpp"
#include "compiler/oopMap.hpp"
#include "interpreter/invocationCounter.hpp"
#include "oops/annotations.hpp"
--- a/hotspot/src/share/vm/opto/c2compiler.hpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/opto/c2compiler.hpp Fri Oct 21 20:12:47 2016 +0200
@@ -32,7 +32,7 @@
static bool init_c2_runtime();
public:
- C2Compiler() : AbstractCompiler(c2) {}
+ C2Compiler() : AbstractCompiler(compiler_c2) {}
// Name
const char *name() { return "C2"; }
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -171,7 +171,6 @@
assert(thread->deopt_compiled_method() == NULL, "Pending deopt!");
CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
thread->set_deopt_compiled_method(cm);
- bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
if (VerifyStack) {
thread->validate_frame_layout();
@@ -241,6 +240,7 @@
JRT_BLOCK
realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
JRT_END
+ bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
#ifndef PRODUCT
if (TraceDeoptimization) {
@@ -1651,7 +1651,7 @@
if (TraceDeoptimization) { // make noise on the tty
tty->print("Uncommon trap occurred in");
nm->method()->print_short_name(tty);
- tty->print(" compiler=%s compile_id=%d", nm->compiler() == NULL ? "" : nm->compiler()->name(), nm->compile_id());
+ tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
#if INCLUDE_JVMCI
if (nm->is_nmethod()) {
oop installedCode = nm->as_nmethod()->jvmci_installed_code();
--- a/hotspot/src/share/vm/runtime/frame.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/runtime/frame.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -686,9 +686,7 @@
if (cm->is_nmethod()) {
nmethod* nm = cm->as_nmethod();
st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : ""));
- if (nm->compiler() != NULL) {
- st->print(" %s", nm->compiler()->name());
- }
+ st->print(" %s", nm->compiler_name());
}
m->name_and_sig_as_C_string(buf, buflen);
st->print(" %s", buf);
--- a/hotspot/src/share/vm/runtime/rtmLocking.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/runtime/rtmLocking.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "utilities/globalDefinitions.hpp"
+#include "compiler/compilerDefinitions.hpp"
#if INCLUDE_RTM_OPT
--- a/hotspot/src/share/vm/utilities/globalDefinitions.cpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.cpp Fri Oct 21 20:12:47 2016 +0200
@@ -214,7 +214,6 @@
return T_ILLEGAL;
}
-
// Map BasicType to size in words
int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, -1};
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Fri Oct 21 10:16:09 2016 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Fri Oct 21 20:12:47 2016 +0200
@@ -444,13 +444,6 @@
// Machine dependent stuff
-// States of Restricted Transactional Memory usage.
-enum RTMState {
- NoRTM = 0x2, // Don't use RTM
- UseRTM = 0x1, // Use RTM
- ProfileRTM = 0x0 // Use RTM with abort ratio calculation
-};
-
// The maximum size of the code cache. Can be overridden by targets.
#define CODE_CACHE_SIZE_LIMIT (2*G)
// Allow targets to reduce the default size of the code cache.
@@ -458,15 +451,6 @@
#include CPU_HEADER(globalDefinitions)
-#ifndef INCLUDE_RTM_OPT
-#define INCLUDE_RTM_OPT 0
-#endif
-#if INCLUDE_RTM_OPT
-#define RTM_OPT_ONLY(code) code
-#else
-#define RTM_OPT_ONLY(code)
-#endif
-
// To assure the IRIW property on processors that are not multiple copy
// atomic, sync instructions must be issued between volatile reads to
// assure their ordering, instead of after volatile stores.
@@ -923,55 +907,6 @@
};
-// Handy constants for deciding which compiler mode to use.
-enum MethodCompilation {
- InvocationEntryBci = -1 // i.e., not a on-stack replacement compilation
-};
-
-// Enumeration to distinguish tiers of compilation
-enum CompLevel {
- CompLevel_any = -1,
- CompLevel_all = -1,
- CompLevel_none = 0, // Interpreter
- CompLevel_simple = 1, // C1
- CompLevel_limited_profile = 2, // C1, invocation & backedge counters
- CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo
- CompLevel_full_optimization = 4, // C2, Shark or JVMCI
-
-#if defined(COMPILER2) || defined(SHARK)
- CompLevel_highest_tier = CompLevel_full_optimization, // pure C2 and tiered or JVMCI and tiered
-#elif defined(COMPILER1)
- CompLevel_highest_tier = CompLevel_simple, // pure C1 or JVMCI
-#else
- CompLevel_highest_tier = CompLevel_none,
-#endif
-
-#if defined(TIERED)
- CompLevel_initial_compile = CompLevel_full_profile // tiered
-#elif defined(COMPILER1) || INCLUDE_JVMCI
- CompLevel_initial_compile = CompLevel_simple // pure C1 or JVMCI
-#elif defined(COMPILER2) || defined(SHARK)
- CompLevel_initial_compile = CompLevel_full_optimization // pure C2
-#else
- CompLevel_initial_compile = CompLevel_none
-#endif
-};
-
-inline bool is_c1_compile(int comp_level) {
- return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
-}
-
-inline bool is_c2_compile(int comp_level) {
- return comp_level == CompLevel_full_optimization;
-}
-
-inline bool is_highest_tier_compile(int comp_level) {
- return comp_level == CompLevel_highest_tier;
-}
-
-inline bool is_compile(int comp_level) {
- return is_c1_compile(comp_level) || is_c2_compile(comp_level);
-}
//----------------------------------------------------------------------------------------------------
// 'Forward' declarations of frequently used classes