--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -52,6 +52,11 @@
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int sparc_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), sparc_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@@ -125,6 +130,11 @@
VtableStub* VtableStubs::create_itable_stub(int itable_index) {
const int sparc_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), sparc_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -58,6 +58,11 @@
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int i486_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), i486_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@@ -132,6 +137,11 @@
// add code here, bump the code stub size returned by pd_code_size_limit!
const int i486_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), i486_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -49,6 +49,11 @@
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int amd64_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), amd64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@@ -126,6 +131,11 @@
// returned by pd_code_size_limit!
const int amd64_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), amd64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -4219,7 +4219,9 @@
}
}
- if (!PrintInlining) return;
+ if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
+ return;
+ }
CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
if (success && CIPrintMethodCodes) {
callee->print_codes();
--- a/hotspot/src/share/vm/code/compiledIC.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/code/compiledIC.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -160,7 +160,7 @@
// High-level access to an inline cache. Guaranteed to be MT-safe.
-void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
+bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
@@ -170,8 +170,10 @@
assert(bytecode == Bytecodes::_invokeinterface, "");
int itable_index = call_info->itable_index();
entry = VtableStubs::find_itable_stub(itable_index);
+ if (entry == false) {
+ return false;
+ }
#ifdef ASSERT
- assert(entry != NULL, "entry not computed");
int index = call_info->resolved_method()->itable_index();
assert(index == itable_index, "CallInfo pre-computes this");
#endif //ASSERT
@@ -184,6 +186,9 @@
int vtable_index = call_info->vtable_index();
assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
entry = VtableStubs::find_vtable_stub(vtable_index);
+ if (entry == NULL) {
+ return false;
+ }
InlineCacheBuffer::create_transition_stub(this, NULL, entry);
}
@@ -200,6 +205,7 @@
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_megamorphic(), "sanity check");
+ return true;
}
--- a/hotspot/src/share/vm/code/compiledIC.hpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/code/compiledIC.hpp Thu Sep 26 08:48:15 2013 +0200
@@ -226,7 +226,10 @@
//
void set_to_clean(); // Can only be called during a safepoint operation
void set_to_monomorphic(CompiledICInfo& info);
- void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+
+ // Returns true if successful and false otherwise. The call can fail if memory
+ // allocation in the code cache fails.
+ bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
--- a/hotspot/src/share/vm/code/vtableStubs.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -46,12 +46,9 @@
address VtableStub::_chunk_end = NULL;
VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
-static int num_vtable_chunks = 0;
-
void* VtableStub::operator new(size_t size, int code_size) throw() {
assert(size == sizeof(VtableStub), "mismatched size");
- num_vtable_chunks++;
// compute real VtableStub size (rounded to nearest word)
const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
// malloc them in chunks to minimize header overhead
@@ -60,7 +57,7 @@
const int bytes = chunk_factor * real_size + pd_code_alignment();
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
- vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
+ return NULL;
}
_chunk = blob->content_begin();
_chunk_end = _chunk + bytes;
@@ -121,6 +118,12 @@
} else {
s = create_itable_stub(vtable_index);
}
+
+ // Creation of vtable or itable can fail if there is not enough free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
enter(is_vtable_stub, vtable_index, s);
if (PrintAdapterHandlers) {
tty->print_cr("Decoding VtableStub %s[%d]@%d",
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -123,7 +123,7 @@
// Allows targeted inlining
if(callee_method->should_inline()) {
*wci_result = *(WarmCallInfo::always_hot());
- if (PrintInlining && Verbose) {
+ if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method is hot: ");
}
@@ -137,7 +137,7 @@
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
size < InlineThrowMaxSize ) {
wci_result->set_profit(wci_result->profit() * 100);
- if (PrintInlining && Verbose) {
+ if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
}
@@ -491,7 +491,7 @@
C->log()->inline_fail(inline_msg);
}
}
- if (PrintInlining) {
+ if (C->print_inlining()) {
C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
if (Verbose && callee_method) {
@@ -540,7 +540,7 @@
#ifndef PRODUCT
if (UseOldInlining && InlineWarmCalls
- && (PrintOpto || PrintOptoInlining || PrintInlining)) {
+ && (PrintOpto || C->print_inlining())) {
bool cold = wci.is_cold();
bool hot = !cold && wci.is_hot();
bool old_cold = !success;
@@ -617,7 +617,7 @@
callee_method->is_compiled_lambda_form()) {
max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem
}
- if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+ if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr(" \\-> discounting inline depth");
}
--- a/hotspot/src/share/vm/opto/callGenerator.hpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/opto/callGenerator.hpp Thu Sep 26 08:48:15 2013 +0200
@@ -159,8 +159,9 @@
virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
- if (PrintInlining)
+ if (C->print_inlining()) {
C->print_inlining(callee, inline_level, bci, msg);
+ }
}
};
--- a/hotspot/src/share/vm/opto/compile.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/opto/compile.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -654,7 +654,7 @@
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
- _print_inlining(0) {
+ _print_inlining_idx(0) {
C = this;
CompileWrapper cw(this);
@@ -679,6 +679,8 @@
set_print_assembly(print_opto_assembly);
set_parsed_irreducible_loop(false);
#endif
+ set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
+ set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
if (ProfileTraps) {
// Make sure the method being compiled gets its own MDO,
@@ -710,7 +712,7 @@
PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn);
- if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+ if (print_inlining() || print_intrinsics()) {
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
{ // Scope for timing the parser
@@ -937,7 +939,7 @@
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
- _print_inlining(0) {
+ _print_inlining_idx(0) {
C = this;
#ifndef PRODUCT
@@ -3611,7 +3613,7 @@
}
void Compile::dump_inlining() {
- if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+ if (print_inlining() || print_intrinsics()) {
// Print inlining message for candidates that we couldn't inline
// for lack of space or non constant receiver
for (int i = 0; i < _late_inlines.length(); i++) {
@@ -3635,7 +3637,7 @@
}
}
for (int i = 0; i < _print_inlining_list->length(); i++) {
- tty->print(_print_inlining_list->at(i).ss()->as_string());
+ tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
}
}
}
--- a/hotspot/src/share/vm/opto/compile.hpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/opto/compile.hpp Thu Sep 26 08:48:15 2013 +0200
@@ -312,6 +312,8 @@
bool _do_method_data_update; // True if we generate code to update MethodData*s
int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
bool _print_assembly; // True if we should dump assembly code for this compilation
+ bool _print_inlining; // True if we should print inlining for this compilation
+ bool _print_intrinsics; // True if we should print intrinsics for this compilation
#ifndef PRODUCT
bool _trace_opto_output;
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@@ -414,7 +416,7 @@
};
GrowableArray<PrintInliningBuffer>* _print_inlining_list;
- int _print_inlining;
+ int _print_inlining_idx;
// Only keep nodes in the expensive node list that need to be optimized
void cleanup_expensive_nodes(PhaseIterGVN &igvn);
@@ -426,24 +428,24 @@
public:
outputStream* print_inlining_stream() const {
- return _print_inlining_list->at(_print_inlining).ss();
+ return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
}
void print_inlining_skip(CallGenerator* cg) {
- if (PrintInlining) {
- _print_inlining_list->at(_print_inlining).set_cg(cg);
- _print_inlining++;
- _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
+ if (_print_inlining) {
+ _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
+ _print_inlining_idx++;
+ _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
}
}
void print_inlining_insert(CallGenerator* cg) {
- if (PrintInlining) {
+ if (_print_inlining) {
for (int i = 0; i < _print_inlining_list->length(); i++) {
- if (_print_inlining_list->at(i).cg() == cg) {
+ if (_print_inlining_list->adr_at(i)->cg() == cg) {
_print_inlining_list->insert_before(i+1, PrintInliningBuffer());
- _print_inlining = i+1;
- _print_inlining_list->at(i).set_cg(NULL);
+ _print_inlining_idx = i+1;
+ _print_inlining_list->adr_at(i)->set_cg(NULL);
return;
}
}
@@ -572,6 +574,10 @@
int AliasLevel() const { return _AliasLevel; }
bool print_assembly() const { return _print_assembly; }
void set_print_assembly(bool z) { _print_assembly = z; }
+ bool print_inlining() const { return _print_inlining; }
+ void set_print_inlining(bool z) { _print_inlining = z; }
+ bool print_intrinsics() const { return _print_intrinsics; }
+ void set_print_intrinsics(bool z) { _print_intrinsics = z; }
// check the CompilerOracle for special behaviours for this compile
bool method_has_option(const char * option) {
return method() != NULL && method()->has_option(option);
--- a/hotspot/src/share/vm/opto/doCall.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/opto/doCall.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -41,9 +41,9 @@
#include "runtime/sharedRuntime.hpp"
void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
- if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
+ if (TraceTypeProfile || C->print_inlining()) {
outputStream* out = tty;
- if (!PrintInlining) {
+ if (!C->print_inlining()) {
if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
method->print_short_name();
tty->cr();
--- a/hotspot/src/share/vm/opto/library_call.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/opto/library_call.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -543,7 +543,7 @@
Compile* C = kit.C;
int nodes = C->unique();
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
tty->print_cr("Intrinsic %s", str);
@@ -554,7 +554,7 @@
// Try to inline the intrinsic.
if (kit.try_to_inline()) {
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -570,7 +570,7 @@
}
// The intrinsic bailed out
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
if (jvms->has_method()) {
// Not a root compile.
const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
@@ -592,7 +592,7 @@
int nodes = C->unique();
#ifndef PRODUCT
assert(is_predicted(), "sanity");
- if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
tty->print_cr("Predicate for intrinsic %s", str);
@@ -603,7 +603,7 @@
Node* slow_ctl = kit.try_to_predicate();
if (!kit.failing()) {
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -617,7 +617,7 @@
}
// The intrinsic bailed out
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
if (jvms->has_method()) {
// Not a root compile.
const char* msg = "failed to generate predicate for intrinsic";
@@ -2299,7 +2299,7 @@
const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
#ifndef PRODUCT
- if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+ if (C->print_intrinsics() || C->print_inlining()) {
tty->print(" from base type: "); adr_type->dump();
tty->print(" sharpened value: "); tjp->dump();
}
@@ -3260,7 +3260,7 @@
if (mirror_con == NULL) return false; // cannot happen?
#ifndef PRODUCT
- if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+ if (C->print_intrinsics() || C->print_inlining()) {
ciType* k = mirror_con->java_mirror_type();
if (k) {
tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
@@ -3952,14 +3952,14 @@
// caller sensitive methods.
bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
}
#endif
if (!jvms()->has_method()) {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out because intrinsic was inlined at top level");
}
#endif
@@ -3983,7 +3983,7 @@
// Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
if (!m->caller_sensitive()) {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
}
#endif
@@ -3999,7 +3999,7 @@
set_result(makecon(TypeInstPtr::make(caller_mirror)));
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -4015,7 +4015,7 @@
}
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Sep 25 13:03:21 2013 -0400
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Sep 26 08:48:15 2013 +0200
@@ -1506,8 +1506,11 @@
info, CHECK_(methodHandle()));
inline_cache->set_to_monomorphic(info);
} else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
- // Change to megamorphic
- inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+ // Potential change to megamorphic
+ bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+ if (!successful) {
+ inline_cache->set_to_clean();
+ }
} else {
// Either clean or megamorphic
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/print/PrintInlining.java Thu Sep 26 08:48:15 2013 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8022585
+ * @summary VM crashes when ran with -XX:+PrintInlining
+ * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining
+ *
+ */
+
+public class PrintInlining {
+ public static void main(String[] args) {
+ System.out.println("Passed");
+ }
+}