--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -1002,18 +1002,6 @@
// and the vm will find there should this case occur.
Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
__ st_ptr(G5_method, callee_target_addr);
-
- if (StressNonEntrant) {
- // Open a big window for deopt failure
- __ save_frame(0);
- __ mov(G0, L0);
- Label loop;
- __ bind(loop);
- __ sub(L0, 1, L0);
- __ br_null_short(L0, Assembler::pt, loop);
- __ restore();
- }
-
__ jmpl(G3, 0, G0);
__ delayed()->nop();
}
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -4338,6 +4338,11 @@
#endif // PRODUCT
void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
+ // A default method's holder is an interface
+ if (known_holder != NULL && known_holder->is_interface()) {
+ assert(known_holder->is_instance_klass() && ((ciInstanceKlass*)known_holder)->has_default_methods(), "should be default method");
+ known_holder = NULL;
+ }
append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
}
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -2574,8 +2574,25 @@
__ jump(x->default_sux());
}
-
-ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) {
+/**
+ * Emit profiling code if needed for arguments, parameters, return value types
+ *
+ * @param md MDO the code will update at runtime
+ * @param md_base_offset common offset in the MDO for this profile and subsequent ones
+ * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
+ * @param profiled_k current profile
+ * @param obj IR node for the object to be profiled
+ * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
+ * Set once we find an update to make and use for next ones.
+ * @param not_null true if we know obj cannot be null
+ * @param signature_at_call_k signature at call for obj
+ * @param callee_signature_k signature of callee for obj
+ * at call and callee signatures differ at method handle call
+ * @return the only klass we know will ever be seen at this profile point
+ */
+ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
+ Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
+ ciKlass* callee_signature_k) {
ciKlass* result = NULL;
bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
bool do_update = !TypeEntries::is_type_unknown(profiled_k);
@@ -2590,9 +2607,9 @@
if (do_update) {
// try to find exact type, using CHA if possible, so that loading
// the klass from the object can be avoided
- ciType* type = arg->exact_type();
+ ciType* type = obj->exact_type();
if (type == NULL) {
- type = arg->declared_type();
+ type = obj->declared_type();
type = comp->cha_exact_type(type);
}
assert(type == NULL || type->is_klass(), "type should be class");
@@ -2608,23 +2625,33 @@
ciKlass* exact_signature_k = NULL;
if (do_update) {
// Is the type from the signature exact (the only one possible)?
- exact_signature_k = signature_k->exact_klass();
+ exact_signature_k = signature_at_call_k->exact_klass();
if (exact_signature_k == NULL) {
- exact_signature_k = comp->cha_exact_type(signature_k);
+ exact_signature_k = comp->cha_exact_type(signature_at_call_k);
} else {
result = exact_signature_k;
- do_update = false;
// Known statically. No need to emit any code: prevent
// LIR_Assembler::emit_profile_type() from emitting useless code
profiled_k = ciTypeEntries::with_status(result, profiled_k);
}
if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
- assert(exact_klass == NULL, "arg and signature disagree?");
+ assert(exact_klass == NULL, "obj and signature disagree?");
// sometimes the type of the signature is better than the best type
// the compiler has
exact_klass = exact_signature_k;
- do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
}
+ if (callee_signature_k != NULL &&
+ callee_signature_k != signature_at_call_k) {
+ ciKlass* improved_klass = callee_signature_k->exact_klass();
+ if (improved_klass == NULL) {
+ improved_klass = comp->cha_exact_type(callee_signature_k);
+ }
+ if (improved_klass != NULL && exact_klass != improved_klass) {
+ assert(exact_klass == NULL, "obj and signature disagree?");
+ exact_klass = exact_signature_k;
+ }
+ }
+ do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
}
if (!do_null && !do_update) {
@@ -2640,7 +2667,7 @@
__ leal(LIR_OprFact::address(base_type_address), mdp);
}
}
- LIRItem value(arg, this);
+ LIRItem value(obj, this);
value.load_item();
__ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
@@ -2665,9 +2692,9 @@
if (t == T_OBJECT || t == T_ARRAY) {
intptr_t profiled_k = parameters->type(j);
Local* local = x->state()->local_at(java_index)->as_Local();
- ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
- in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
- profiled_k, local, mdp, false, local->declared_type()->as_klass());
+ ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
+ in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
+ profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
// If the profile is known statically set it once for all and do not emit any code
if (exact != NULL) {
md->set_parameter_type(j, exact);
@@ -3129,19 +3156,28 @@
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
int start = 0;
int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
- if (x->nb_profiled_args() < stop) {
- // if called through method handle invoke, some arguments may have been popped
- stop = x->nb_profiled_args();
+ if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
+ // first argument is not profiled at call (method handle invoke)
+ assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
+ start = 1;
}
- ciSignature* sig = x->callee()->signature();
+ ciSignature* callee_signature = x->callee()->signature();
// method handle call to virtual method
bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
- ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
- for (int i = 0; i < stop; i++) {
+ ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
+
+ bool ignored_will_link;
+ ciSignature* signature_at_call = NULL;
+ x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
+ ciSignatureStream signature_at_call_stream(signature_at_call);
+
+ // if called through method handle invoke, some arguments may have been popped
+ for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
- ciKlass* exact = profile_arg_type(md, base_offset, off,
- args->type(i), x->profiled_arg_at(i+start), mdp,
- !x->arg_needs_null_check(i+start), sig_stream.next_klass());
+ ciKlass* exact = profile_type(md, base_offset, off,
+ args->type(i), x->profiled_arg_at(i+start), mdp,
+ !x->arg_needs_null_check(i+start),
+ signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
if (exact != NULL) {
md->set_argument_type(bci, i, exact);
}
@@ -3176,8 +3212,8 @@
int bci = x->bci_of_invoke();
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
// The first parameter is the receiver so that's what we start
- // with if it exists. On exception if method handle call to
- // virtual method has receiver in the args list
+ // with if it exists. One exception is method handle call to
+ // virtual method: the receiver is in the args list
if (arg == NULL || !Bytecodes::has_receiver(bc)) {
i = 1;
arg = x->profiled_arg_at(0);
@@ -3186,9 +3222,9 @@
int k = 0; // to iterate on the profile data
for (;;) {
intptr_t profiled_k = parameters->type(k);
- ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
- in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
- profiled_k, arg, mdp, not_null, sig_stream.next_klass());
+ ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
+ in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
+ profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
// If the profile is known statically set it once for all and do not emit any code
if (exact != NULL) {
md->set_parameter_type(k, exact);
@@ -3247,9 +3283,16 @@
assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
LIR_Opr mdp = LIR_OprFact::illegalOpr;
- ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
- ret->type(), x->ret(), mdp,
- !x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
+
+ bool ignored_will_link;
+ ciSignature* signature_at_call = NULL;
+ x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
+
+ ciKlass* exact = profile_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
+ ret->type(), x->ret(), mdp,
+ !x->needs_null_check(),
+ signature_at_call->return_type()->as_klass(),
+ x->callee()->signature()->return_type()->as_klass());
if (exact != NULL) {
md->set_return_type(bci, exact);
}
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Thu Nov 14 13:38:49 2013 -0800
@@ -434,7 +434,9 @@
void do_ThreadIDIntrinsic(Intrinsic* x);
void do_ClassIDIntrinsic(Intrinsic* x);
#endif
- ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
+ ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
+ Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
+ ciKlass* callee_signature_k);
void profile_arguments(ProfileCall* x);
void profile_parameters(Base* x);
void profile_parameters_at_call(ProfileCall* x);
--- a/hotspot/src/share/vm/c1/c1_globals.hpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp Thu Nov 14 13:38:49 2013 -0800
@@ -341,9 +341,6 @@
diagnostic(bool, C1PatchInvokeDynamic, true, \
"Patch invokedynamic appendix not known at compile time") \
\
- develop(intx, MaxForceInlineLevel, 100, \
- "maximum number of nested @ForceInline calls that are inlined") \
- \
// Read default values for c1 globals
--- a/hotspot/src/share/vm/ci/ciEnv.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -935,7 +935,9 @@
// Prevent SystemDictionary::add_to_hierarchy from running
// and invalidating our dependencies until we install this method.
+ // No safepoints are allowed. Otherwise, class redefinition can occur in between.
MutexLocker ml(Compile_lock);
+ No_Safepoint_Verifier nsv;
// Change in Jvmti state may invalidate compilation.
if (!failing() &&
@@ -1001,16 +1003,6 @@
// Free codeBlobs
code_buffer->free_blob();
- // stress test 6243940 by immediately making the method
- // non-entrant behind the system's back. This has serious
- // side effects on the code cache and is not meant for
- // general stress testing
- if (nm != NULL && StressNonEntrant) {
- MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
- NativeJump::patch_verified_entry(nm->entry_point(), nm->verified_entry_point(),
- SharedRuntime::get_handle_wrong_method_stub());
- }
-
if (nm == NULL) {
// The CodeCache is full. Print out warning and disable compilation.
record_failure("code cache is full");
@@ -1036,11 +1028,11 @@
char *method_name = method->name_and_sig_as_C_string();
tty->print_cr("Replacing method %s", method_name);
}
- if (old != NULL ) {
+ if (old != NULL) {
old->make_not_entrant();
}
}
- if (TraceNMethodInstalls ) {
+ if (TraceNMethodInstalls) {
ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string();
ttyLocker ttyl;
@@ -1051,7 +1043,7 @@
// Allow the code to be executed
method->set_code(method, nm);
} else {
- if (TraceNMethodInstalls ) {
+ if (TraceNMethodInstalls) {
ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string();
ttyLocker ttyl;
@@ -1061,7 +1053,6 @@
entry_bci);
}
method->method_holder()->add_osr_nmethod(nm);
-
}
}
}
--- a/hotspot/src/share/vm/ci/ciMethodData.hpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciMethodData.hpp Thu Nov 14 13:38:49 2013 -0800
@@ -77,7 +77,9 @@
static ciKlass* valid_ciklass(intptr_t k) {
if (!TypeEntries::is_type_none(k) &&
!TypeEntries::is_type_unknown(k)) {
- return (ciKlass*)TypeEntries::klass_part(k);
+ ciKlass* res = (ciKlass*)TypeEntries::klass_part(k);
+ assert(res != NULL, "invalid");
+ return res;
} else {
return NULL;
}
--- a/hotspot/src/share/vm/code/nmethod.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/code/nmethod.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -618,21 +618,18 @@
// record this nmethod as dependent on this klass
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
}
- }
- NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
- if (PrintAssembly && nm != NULL) {
- Disassembler::decode(nm);
+ NOT_PRODUCT(nmethod_stats.note_nmethod(nm));
+ if (PrintAssembly) {
+ Disassembler::decode(nm);
+ }
}
}
-
- // verify nmethod
- debug_only(if (nm) nm->verify();) // might block
-
+ // Do verification and logging outside CodeCache_lock.
if (nm != NULL) {
+ // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
+ DEBUG_ONLY(nm->verify();)
nm->log_new_nmethod();
}
-
- // done
return nm;
}
@@ -1262,7 +1259,7 @@
set_osr_link(NULL);
//set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
- NMethodSweeper::notify();
+ NMethodSweeper::report_state_change(this);
}
void nmethod::invalidate_osr_method() {
@@ -1296,7 +1293,9 @@
}
}
-// Common functionality for both make_not_entrant and make_zombie
+/**
+ * Common functionality for both make_not_entrant and make_zombie
+ */
bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
assert(!is_zombie(), "should not already be a zombie");
@@ -1420,9 +1419,7 @@
tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
}
- // Make sweeper aware that there is a zombie method that needs to be removed
- NMethodSweeper::notify();
-
+ NMethodSweeper::report_state_change(this);
return true;
}
@@ -2395,20 +2392,23 @@
void nmethod::verify_interrupt_point(address call_site) {
- // This code does not work in release mode since
- // owns_lock only is available in debug mode.
- CompiledIC* ic = NULL;
- Thread *cur = Thread::current();
- if (CompiledIC_lock->owner() == cur ||
- ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
- SafepointSynchronize::is_at_safepoint())) {
- ic = CompiledIC_at(this, call_site);
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
- } else {
- MutexLocker ml_verify (CompiledIC_lock);
- ic = CompiledIC_at(this, call_site);
+ // Verify IC only when nmethod installation is finished.
+ bool is_installed = (method()->code() == this) // nmethod is in state 'alive' and installed
+ || !this->is_in_use(); // nmethod is installed, but not in 'alive' state
+ if (is_installed) {
+ Thread *cur = Thread::current();
+ if (CompiledIC_lock->owner() == cur ||
+ ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
+ SafepointSynchronize::is_at_safepoint())) {
+ CompiledIC_at(this, call_site);
+ CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+ } else {
+ MutexLocker ml_verify (CompiledIC_lock);
+ CompiledIC_at(this, call_site);
+ }
}
- PcDesc* pd = pc_desc_at(ic->end_of_call());
+
+ PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
assert(pd != NULL, "PcDesc must exist");
for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
pd->obj_decode_offset(), pd->should_reexecute(),
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -126,6 +126,7 @@
bool CompileBroker::_initialized = false;
volatile bool CompileBroker::_should_block = false;
+volatile jint CompileBroker::_print_compilation_warning = 0;
volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
// The installed compiler(s)
@@ -2027,11 +2028,10 @@
#endif
}
-// ------------------------------------------------------------------
-// CompileBroker::handle_full_code_cache
-//
-// The CodeCache is full. Print out warning and disable compilation or
-// try code cache cleaning so compilation can continue later.
+/**
+ * The CodeCache is full. Print out warning and disable compilation
+ * or try code cache cleaning so compilation can continue later.
+ */
void CompileBroker::handle_full_code_cache() {
UseInterpreter = true;
if (UseCompiler || AlwaysCompileLoopMethods ) {
@@ -2048,12 +2048,9 @@
xtty->stamp();
xtty->end_elem();
}
- warning("CodeCache is full. Compiler has been disabled.");
- warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
CodeCache::report_codemem_full();
-
#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
codecache_print(/* detailed= */ true);
@@ -2066,17 +2063,22 @@
// Since code cache is full, immediately stop new compiles
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
NMethodSweeper::log_sweep("disable_compiler");
-
- // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
- // without having to consider the state in which the current thread is.
- ThreadInVMfromUnknown in_vm;
- NMethodSweeper::possibly_sweep();
}
+ // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
+ // without having to consider the state in which the current thread is.
+ ThreadInVMfromUnknown in_vm;
+ NMethodSweeper::possibly_sweep();
} else {
disable_compilation_forever();
}
+
+ // Print warning only once
+ if (should_print_compiler_warning()) {
+ warning("CodeCache is full. Compiler has been disabled.");
+ warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
+ codecache_print(/* detailed= */ true);
+ }
}
- codecache_print(/* detailed= */ true);
}
// ------------------------------------------------------------------
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp Thu Nov 14 13:38:49 2013 -0800
@@ -315,6 +315,8 @@
static int _sum_nmethod_code_size;
static long _peak_compilation_time;
+ static volatile jint _print_compilation_warning;
+
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
@@ -418,7 +420,11 @@
return _should_compile_new_jobs == shutdown_compilaton;
}
static void handle_full_code_cache();
-
+ // Ensures that warning is only printed once.
+ static bool should_print_compiler_warning() {
+ jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
+ return old == 0;
+ }
// Return total compilation ticks
static jlong total_compilation_ticks() {
return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0;
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -2211,6 +2211,10 @@
data = mdo->next_data(data)) {
data->clean_weak_klass_links(is_alive);
}
+ ParametersTypeData* parameters = mdo->parameters_type_data();
+ if (parameters != NULL) {
+ parameters->clean_weak_klass_links(is_alive);
+ }
}
}
}
--- a/hotspot/src/share/vm/oops/methodData.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/oops/methodData.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -275,23 +275,23 @@
}
bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
- return !is_type_none(p) &&
- !((Klass*)klass_part(p))->is_loader_alive(is_alive_cl);
+ Klass* k = (Klass*)klass_part(p);
+ return k != NULL && k->is_loader_alive(is_alive_cl);
}
void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
for (int i = 0; i < _number_of_entries; i++) {
intptr_t p = type(i);
- if (is_loader_alive(is_alive_cl, p)) {
- set_type(i, type_none());
+ if (!is_loader_alive(is_alive_cl, p)) {
+ set_type(i, with_status((Klass*)NULL, p));
}
}
}
void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
intptr_t p = type();
- if (is_loader_alive(is_alive_cl, p)) {
- set_type(type_none());
+ if (!is_loader_alive(is_alive_cl, p)) {
+ set_type(with_status((Klass*)NULL, p));
}
}
--- a/hotspot/src/share/vm/oops/methodData.hpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/oops/methodData.hpp Thu Nov 14 13:38:49 2013 -0800
@@ -690,7 +690,6 @@
// recorded type: cell without bit 0 and 1
static intptr_t klass_part(intptr_t v) {
intptr_t r = v & type_klass_mask;
- assert (r != 0, "invalid");
return r;
}
@@ -698,7 +697,9 @@
static Klass* valid_klass(intptr_t k) {
if (!is_type_none(k) &&
!is_type_unknown(k)) {
- return (Klass*)klass_part(k);
+ Klass* res = (Klass*)klass_part(k);
+ assert(res != NULL, "invalid");
+ return res;
} else {
return NULL;
}
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -389,6 +389,10 @@
return false;
}
if (inline_level() > _max_inline_level) {
+ if (callee_method->force_inline() && inline_level() > MaxForceInlineLevel) {
+ set_msg("MaxForceInlineLevel");
+ return false;
+ }
if (!callee_method->force_inline() || !IncrementalInline) {
set_msg("inlining too deep");
return false;
--- a/hotspot/src/share/vm/opto/callGenerator.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -776,7 +776,7 @@
guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
const int vtable_index = Method::invalid_vtable_index;
CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
- assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
+ assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
if (cg != NULL && cg->is_inline())
return cg;
}
@@ -846,7 +846,7 @@
}
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
- assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
+ assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
if (cg != NULL && cg->is_inline())
return cg;
}
--- a/hotspot/src/share/vm/opto/loopopts.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/opto/loopopts.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -42,6 +42,13 @@
// so disable this for now
return NULL;
}
+
+ if (n->is_MathExact()) {
+ // MathExact has projections that are not correctly handled in the code
+ // below.
+ return NULL;
+ }
+
int wins = 0;
assert(!n->is_CFG(), "");
assert(region->is_Region(), "");
--- a/hotspot/src/share/vm/opto/matcher.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/opto/matcher.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -464,17 +464,17 @@
C->FIRST_STACK_mask().Clear();
// Add in the incoming argument area
- OptoReg::Name init = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
- for (i = init; i < _in_arg_limit; i = OptoReg::add(i,1))
+ OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
+ for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
C->FIRST_STACK_mask().Insert(i);
-
+ }
// Add in all bits past the outgoing argument area
guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
"must be able to represent all call arguments in reg mask");
- init = _out_arg_limit;
- for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
+ OptoReg::Name init = _out_arg_limit;
+ for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
C->FIRST_STACK_mask().Insert(i);
-
+ }
// Finally, set the "infinite stack" bit.
C->FIRST_STACK_mask().set_AllStack();
@@ -506,16 +506,36 @@
idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
}
if (Matcher::vector_size_supported(T_FLOAT,2)) {
+ // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
+ // RA guarantees such alignment since it is needed for Double and Long values.
*idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
}
if (Matcher::vector_size_supported(T_FLOAT,4)) {
+ // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
+ //
+ // RA can use input arguments stack slots for spills but until RA
+ // we don't know frame size and offset of input arg stack slots.
+ //
+ // Exclude last input arg stack slots to avoid spilling vectors there
+ // otherwise vector spills could stomp over stack slots in caller frame.
+ OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
+ for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
+ aligned_stack_mask.Remove(in);
+ in = OptoReg::add(in, -1);
+ }
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
}
if (Matcher::vector_size_supported(T_FLOAT,8)) {
+ // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
+ OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
+ for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
+ aligned_stack_mask.Remove(in);
+ in = OptoReg::add(in, -1);
+ }
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
--- a/hotspot/src/share/vm/opto/mathexactnode.hpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/opto/mathexactnode.hpp Thu Nov 14 13:38:49 2013 -0800
@@ -49,7 +49,7 @@
virtual Node* Identity(PhaseTransform* phase) { return this; }
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) { return NULL; }
virtual const Type* Value(PhaseTransform* phase) const { return bottom_type(); }
- virtual uint hash() const { return Node::hash(); }
+ virtual uint hash() const { return NO_HASH; }
virtual bool is_CFG() const { return false; }
virtual uint ideal_reg() const { return NotAMachineReg; }
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -1132,9 +1132,6 @@
Tier3InvokeNotifyFreqLog = 0;
Tier4InvocationThreshold = 0;
}
- if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
- FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
- }
}
#if INCLUDE_ALL_GCS
@@ -3648,6 +3645,11 @@
"Incompatible compilation policy selected", NULL);
}
}
+ // Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered)
+ if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
+ FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
+ }
+
// Set heap size based on available physical memory
set_heap_size();
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/runtime/globals.hpp Thu Nov 14 13:38:49 2013 -0800
@@ -2954,6 +2954,9 @@
product(intx, MaxRecursiveInlineLevel, 1, \
"maximum number of nested recursive calls that are inlined") \
\
+ develop(intx, MaxForceInlineLevel, 100, \
+ "maximum number of nested @ForceInline calls that are inlined") \
+ \
product_pd(intx, InlineSmallCode, \
"Only inline already compiled methods if their code size is " \
"less than this") \
@@ -3019,9 +3022,6 @@
notproduct(intx, ZombieALotInterval, 5, \
"Number of exits until ZombieALot kicks in") \
\
- develop(bool, StressNonEntrant, false, \
- "Mark nmethods non-entrant at registration") \
- \
diagnostic(intx, MallocVerifyInterval, 0, \
"If non-zero, verify C heap after every N calls to " \
"malloc/realloc/free") \
@@ -3289,7 +3289,7 @@
"Exit the VM if we fill the code cache") \
\
product(bool, UseCodeCacheFlushing, true, \
- "Attempt to clean the code cache before shutting off compiler") \
+ "Remove cold/old nmethods from the code cache") \
\
/* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \
--- a/hotspot/src/share/vm/runtime/sweeper.cpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp Thu Nov 14 13:38:49 2013 -0800
@@ -112,14 +112,13 @@
if (_records != NULL) {
_records[_sweep_index].traversal = _traversals;
_records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
- _records[_sweep_index].invocation = _invocations;
+ _records[_sweep_index].invocation = _sweep_fractions_left;
_records[_sweep_index].compile_id = nm->compile_id();
_records[_sweep_index].kind = nm->compile_kind();
_records[_sweep_index].state = nm->_state;
_records[_sweep_index].vep = nm->verified_entry_point();
_records[_sweep_index].uep = nm->entry_point();
_records[_sweep_index].line = line;
-
_sweep_index = (_sweep_index + 1) % SweeperLogEntries;
}
}
@@ -127,26 +126,29 @@
#define SWEEP(nm)
#endif
-nmethod* NMethodSweeper::_current = NULL; // Current nmethod
-long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed
-int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache
-int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
-int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
-int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
-
-volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass
-volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
+nmethod* NMethodSweeper::_current = NULL; // Current nmethod
+long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
+long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
+long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
+int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
+int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
+int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
+int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
-jint NMethodSweeper::_locked_seen = 0;
-jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
-bool NMethodSweeper::_request_mark_phase = false;
+volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
+volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
+volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
+volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
+ // 1) alive -> not_entrant
+ // 2) not_entrant -> zombie
+ // 3) zombie -> marked_for_reclamation
-int NMethodSweeper::_total_nof_methods_reclaimed = 0;
-jlong NMethodSweeper::_total_time_sweeping = 0;
-jlong NMethodSweeper::_total_time_this_sweep = 0;
-jlong NMethodSweeper::_peak_sweep_time = 0;
-jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
-int NMethodSweeper::_hotness_counter_reset_val = 0;
+int NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
+jlong NMethodSweeper::_total_time_sweeping = 0; // Accumulated time sweeping
+jlong NMethodSweeper::_total_time_this_sweep = 0; // Total time this sweep
+jlong NMethodSweeper::_peak_sweep_time = 0; // Peak time for a full sweep
+jlong NMethodSweeper::_peak_sweep_fraction_time = 0; // Peak time sweeping one fraction
+int NMethodSweeper::_hotness_counter_reset_val = 0;
class MarkActivationClosure: public CodeBlobClosure {
@@ -197,13 +199,16 @@
return;
}
+ // Increase time so that we can estimate when to invoke the sweeper again.
+ _time_counter++;
+
// Check for restart
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
- if (!sweep_in_progress() && need_marking_phase()) {
- _seen = 0;
- _invocations = NmethodSweepFraction;
- _current = CodeCache::first_nmethod();
- _traversals += 1;
+ if (!sweep_in_progress()) {
+ _seen = 0;
+ _sweep_fractions_left = NmethodSweepFraction;
+ _current = CodeCache::first_nmethod();
+ _traversals += 1;
_total_time_this_sweep = 0;
if (PrintMethodFlushing) {
@@ -211,10 +216,6 @@
}
Threads::nmethods_do(&mark_activation_closure);
- // reset the flags since we started a scan from the beginning.
- reset_nmethod_marking();
- _locked_seen = 0;
- _not_entrant_seen_on_stack = 0;
} else {
// Only set hotness counter
Threads::nmethods_do(&set_hotness_closure);
@@ -222,14 +223,48 @@
OrderAccess::storestore();
}
-
+/**
+ * This function invokes the sweeper if at least one of the three conditions is met:
+ * (1) The code cache is getting full
+ * (2) There are sufficient state changes in/since the last sweep.
+ * (3) We have not been sweeping for 'some time'
+ */
void NMethodSweeper::possibly_sweep() {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
if (!MethodFlushing || !sweep_in_progress()) {
return;
}
- if (_invocations > 0) {
+ // If there was no state change while nmethod sweeping, 'should_sweep' will be false.
+ // This is one of the two places where should_sweep can be set to true. The general
+ // idea is as follows: If there is enough free space in the code cache, there is no
+ // need to invoke the sweeper. The following formula (which determines whether to invoke
+ // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
+ // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
+ // the formula considers how much space in the code cache is currently used. Here are
+ // some examples that will (hopefully) help in understanding.
+ //
+ // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
+ // the result of the division is 0. This
+ // keeps the used code cache size small
+ // (important for embedded Java)
+ // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
+ // computes: (256 / 16) - 1 = 15
+ // As a result, we invoke the sweeper after
+ // 15 invocations of 'mark_active_nmethods.
+ // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
+ // computes: (256 / 16) - 10 = 6.
+ if (!_should_sweep) {
+ int time_since_last_sweep = _time_counter - _last_sweep;
+ double wait_until_next_sweep = (ReservedCodeCacheSize / (16 * M)) - time_since_last_sweep -
+ CodeCache::reverse_free_ratio();
+
+ if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
+ _should_sweep = true;
+ }
+ }
+
+ if (_should_sweep && _sweep_fractions_left > 0) {
// Only one thread at a time will sweep
jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
if (old != 0) {
@@ -242,31 +277,46 @@
memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
}
#endif
- if (_invocations > 0) {
+
+ if (_sweep_fractions_left > 0) {
sweep_code_cache();
- _invocations--;
+ _sweep_fractions_left--;
+ }
+
+ // We are done with sweeping the code cache once.
+ if (_sweep_fractions_left == 0) {
+ _last_sweep = _time_counter;
+ // Reset flag; temporarily disables sweeper
+ _should_sweep = false;
+ // If there was enough state change, 'possibly_enable_sweeper()'
+ // sets '_should_sweep' to true
+ possibly_enable_sweeper();
+ // Reset _bytes_changed only if there was enough state change. _bytes_changed
+ // can further increase by calls to 'report_state_change'.
+ if (_should_sweep) {
+ _bytes_changed = 0;
+ }
}
_sweep_started = 0;
}
}
void NMethodSweeper::sweep_code_cache() {
-
jlong sweep_start_counter = os::elapsed_counter();
- _flushed_count = 0;
- _zombified_count = 0;
- _marked_count = 0;
+ _flushed_count = 0;
+ _zombified_count = 0;
+ _marked_for_reclamation_count = 0;
if (PrintMethodFlushing && Verbose) {
- tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
+ tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
}
if (!CompileBroker::should_compile_new_jobs()) {
// If we have turned off compilations we might as well do full sweeps
// in order to reach the clean state faster. Otherwise the sleeping compiler
// threads will slow down sweeping.
- _invocations = 1;
+ _sweep_fractions_left = 1;
}
// We want to visit all nmethods after NmethodSweepFraction
@@ -274,7 +324,7 @@
// remaining number of invocations. This is only an estimate since
// the number of nmethods changes during the sweep so the final
// stage must iterate until it there are no more nmethods.
- int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
+ int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
int swept_count = 0;
@@ -286,11 +336,11 @@
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The last invocation iterates until there are no more nmethods
- for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
+ for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
swept_count++;
if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
if (PrintMethodFlushing && Verbose) {
- tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
+ tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@@ -314,19 +364,7 @@
}
}
- assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
-
- if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
- // we've completed a scan without making progress but there were
- // nmethods we were unable to process either because they were
- // locked or were still on stack. We don't have to aggressively
- // clean them up so just stop scanning. We could scan once more
- // but that complicates the control logic and it's unlikely to
- // matter much.
- if (PrintMethodFlushing) {
- tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
- }
- }
+ assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
jlong sweep_end_counter = os::elapsed_counter();
jlong sweep_time = sweep_end_counter - sweep_start_counter;
@@ -340,21 +378,21 @@
event.set_starttime(sweep_start_counter);
event.set_endtime(sweep_end_counter);
event.set_sweepIndex(_traversals);
- event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
+ event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
event.set_sweptCount(swept_count);
event.set_flushedCount(_flushed_count);
- event.set_markedCount(_marked_count);
+ event.set_markedCount(_marked_for_reclamation_count);
event.set_zombifiedCount(_zombified_count);
event.commit();
}
#ifdef ASSERT
if(PrintMethodFlushing) {
- tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
+ tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time);
}
#endif
- if (_invocations == 1) {
+ if (_sweep_fractions_left == 1) {
_peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
log_sweep("finished");
}
@@ -368,12 +406,37 @@
// it only makes sense to re-enable compilation if we have actually freed memory.
// Note that typically several kB are released for sweeping 16MB of the code
// cache. As a result, 'freed_memory' > 0 to restart the compiler.
- if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
+ if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
log_sweep("restart_compiler");
}
}
+/**
+ * This function updates the sweeper statistics that keep track of nmethods
+ * state changes. If there is 'enough' state change, the sweeper is invoked
+ * as soon as possible. There can be data races on _bytes_changed. The data
+ * races are benign, since it does not matter if we loose a couple of bytes.
+ * In the worst case we call the sweeper a little later. Also, we are guaranteed
+ * to invoke the sweeper if the code cache gets full.
+ */
+void NMethodSweeper::report_state_change(nmethod* nm) {
+ _bytes_changed += nm->total_size();
+ possibly_enable_sweeper();
+}
+
+/**
+ * Function determines if there was 'enough' state change in the code cache to invoke
+ * the sweeper again. Currently, we determine 'enough' as more than 1% state change in
+ * the code cache since the last sweep.
+ */
+void NMethodSweeper::possibly_enable_sweeper() {
+ double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
+ if (percent_changed > 1.0) {
+ _should_sweep = true;
+ }
+}
+
class NMethodMarker: public StackObj {
private:
CompilerThread* _thread;
@@ -424,9 +487,6 @@
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
- } else {
- _locked_seen++;
- SWEEP(nm);
}
return freed_memory;
}
@@ -448,8 +508,9 @@
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
}
nm->mark_for_reclamation();
- request_nmethod_marking();
- _marked_count++;
+ // Keep track of code cache state change
+ _bytes_changed += nm->total_size();
+ _marked_for_reclamation_count++;
SWEEP(nm);
}
} else if (nm->is_not_entrant()) {
@@ -459,18 +520,14 @@
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
}
+ // Code cache state change is tracked in make_zombie()
nm->make_zombie();
- request_nmethod_marking();
_zombified_count++;
SWEEP(nm);
} else {
// Still alive, clean up its inline caches
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
- // we coudn't transition this nmethod so don't immediately
- // request a rescan. If this method stays on the stack for a
- // long time we don't want to keep rescanning the code cache.
- _not_entrant_seen_on_stack++;
SWEEP(nm);
}
} else if (nm->is_unloaded()) {
@@ -485,8 +542,8 @@
release_nmethod(nm);
_flushed_count++;
} else {
+ // Code cache state change is tracked in make_zombie()
nm->make_zombie();
- request_nmethod_marking();
_zombified_count++;
SWEEP(nm);
}
@@ -514,7 +571,11 @@
// The second condition ensures that methods are not immediately made not-entrant
// after compilation.
nm->make_not_entrant();
- request_nmethod_marking();
+ // Code cache state change is tracked in make_not_entrant()
+ if (PrintMethodFlushing && Verbose) {
+ tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
+ nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
+ }
}
}
}
--- a/hotspot/src/share/vm/runtime/sweeper.hpp Thu Nov 14 21:05:16 2013 +0100
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp Thu Nov 14 13:38:49 2013 -0800
@@ -53,22 +53,22 @@
// is full.
class NMethodSweeper : public AllStatic {
- static long _traversals; // Stack scan count, also sweep ID.
- static nmethod* _current; // Current nmethod
- static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
- static int _flushed_count; // Nof. nmethods flushed in current sweep
- static int _zombified_count; // Nof. nmethods made zombie in current sweep
- static int _marked_count; // Nof. nmethods marked for reclaim in current sweep
+ static long _traversals; // Stack scan count, also sweep ID.
+ static long _time_counter; // Virtual time used to periodically invoke sweeper
+ static long _last_sweep; // Value of _time_counter when the last sweep happened
+ static nmethod* _current; // Current nmethod
+ static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
+ static int _flushed_count; // Nof. nmethods flushed in current sweep
+ static int _zombified_count; // Nof. nmethods made zombie in current sweep
+ static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
- static volatile int _invocations; // No. of invocations left until we are completed with this pass
- static volatile int _sweep_started; // Flag to control conc sweeper
-
- //The following are reset in mark_active_nmethods and synchronized by the safepoint
- static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse,
- // always checked and reset at a safepoint so memory will be in sync.
- static int _locked_seen; // Number of locked nmethods encountered during the scan
- static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
-
+ static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass
+ static volatile int _sweep_started; // Flag to control conc sweeper
+ static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
+ static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
+ // 1) alive -> not_entrant
+ // 2) not_entrant -> zombie
+ // 3) zombie -> marked_for_reclamation
// Stat counters
static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed
static jlong _total_time_sweeping; // Accumulated time sweeping
@@ -81,9 +81,6 @@
static bool sweep_in_progress();
static void sweep_code_cache();
- static void request_nmethod_marking() { _request_mark_phase = true; }
- static void reset_nmethod_marking() { _request_mark_phase = false; }
- static bool need_marking_phase() { return _request_mark_phase; }
static int _hotness_counter_reset_val;
@@ -109,13 +106,8 @@
static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2);
static int hotness_counter_reset_val();
-
- static void notify() {
- // Request a new sweep of the code cache from the beginning. No
- // need to synchronize the setting of this flag since it only
- // changes to false at safepoint so we can never overwrite it with false.
- request_nmethod_marking();
- }
+ static void report_state_change(nmethod* nm);
+ static void possibly_enable_sweeper();
};
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/mathexact/GVNTest.java Thu Nov 14 13:38:49 2013 -0800
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8028207
+ * @summary Verify that GVN doesn't mess up the two addExacts
+ * @compile GVNTest.java
+ * @run main GVNTest
+ *
+ */
+
+public class GVNTest {
+ public static int result = 0;
+ public static int value = 93;
+ public static void main(String[] args) {
+ for (int i = 0; i < 50000; ++i) {
+ result = runTest(value + i);
+ result = runTest(value + i);
+ result = runTest(value + i);
+ result = runTest(value + i);
+ result = runTest(value + i);
+ }
+ }
+
+ public static int runTest(int value) {
+ int v = value + value;
+ int sum = 0;
+ if (v < 4032) {
+ for (int i = 0; i < 1023; ++i) {
+ sum += Math.addExact(value, value);
+ }
+ } else {
+ for (int i = 0; i < 321; ++i) {
+ sum += Math.addExact(value, value);
+ }
+ }
+ return sum + v;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/mathexact/SplitThruPhiTest.java Thu Nov 14 13:38:49 2013 -0800
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8028198
+ * @summary Verify that split through phi does the right thing
+ * @compile SplitThruPhiTest.java
+ * @run main SplitThruPhiTest
+ *
+ */
+
+public class SplitThruPhiTest {
+ public static volatile int value = 19;
+ public static int store = 0;
+ public static void main(String[] args) {
+ for (int i = 0; i < 150000; ++i) {
+ store = runTest(value);
+ }
+ }
+
+ public static int runTest(int val) {
+ int result = Math.addExact(val, 1);
+ int total = 0;
+ for (int i = val; i < 200; i = Math.addExact(i, 1)) {
+ total += i;
+ }
+ return total;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/profiling/TestUnexpectedProfilingMismatch.java Thu Nov 14 13:38:49 2013 -0800
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027631
+ * @summary profiling of arguments at calls cannot rely on signature of callee for types
+ * @run main/othervm -XX:-BackgroundCompilation -XX:TieredStopAtLevel=3 -XX:TypeProfileLevel=111 -XX:Tier3InvocationThreshold=200 -XX:Tier0InvokeNotifyFreqLog=7 TestUnexpectedProfilingMismatch
+ *
+ */
+
+import java.lang.invoke.*;
+
+public class TestUnexpectedProfilingMismatch {
+
+ static class A {
+ }
+
+ static class B {
+ }
+
+ static void mA(A a) {
+ }
+
+ static void mB(B b) {
+ }
+
+ static final MethodHandle mhA;
+ static final MethodHandle mhB;
+ static {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodType mt = MethodType.methodType(void.class, A.class);
+ MethodHandle res = null;
+ try {
+ res = lookup.findStatic(TestUnexpectedProfilingMismatch.class, "mA", mt);
+ } catch(NoSuchMethodException ex) {
+ } catch(IllegalAccessException ex) {
+ }
+ mhA = res;
+ mt = MethodType.methodType(void.class, B.class);
+ try {
+ res = lookup.findStatic(TestUnexpectedProfilingMismatch.class, "mB", mt);
+ } catch(NoSuchMethodException ex) {
+ } catch(IllegalAccessException ex) {
+ }
+ mhB = res;
+ }
+
+ void m1(A a, boolean doit) throws Throwable {
+ if (doit) {
+ mhA.invoke(a);
+ }
+ }
+
+ void m2(B b) throws Throwable {
+ mhB.invoke(b);
+ }
+
+ static public void main(String[] args) {
+ TestUnexpectedProfilingMismatch tih = new TestUnexpectedProfilingMismatch();
+ A a = new A();
+ B b = new B();
+ try {
+ for (int i = 0; i < 256 - 1; i++) {
+ tih.m1(a, true);
+ }
+ // Will trigger the compilation but will also run once
+ // more interpreted with a non null MDO which it will
+ // update. Make it skip the body of the method.
+ tih.m1(a, false);
+ // Compile this one as well and do the profiling
+ for (int i = 0; i < 256; i++) {
+ tih.m2(b);
+ }
+ // Will run and see a conflict
+ tih.m1(a, true);
+ } catch(Throwable ex) {
+ ex.printStackTrace();
+ }
+ System.out.println("TEST PASSED");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/profiling/unloadingconflict/B.java Thu Nov 14 13:38:49 2013 -0800
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class B {
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/profiling/unloadingconflict/TestProfileConflictClassUnloading.java Thu Nov 14 13:38:49 2013 -0800
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027572
+ * @summary class unloading resets profile, method compiled after the profile is first set and before class loading sets unknown bit with not recorded class
+ * @build B
+ * @run main/othervm -XX:TypeProfileLevel=222 -XX:-BackgroundCompilation TestProfileConflictClassUnloading
+ *
+ */
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.Paths;
+
+public class TestProfileConflictClassUnloading {
+ static class A {
+ }
+
+
+ static void m1(Object o) {
+ }
+
+ static void m2(Object o) {
+ m1(o);
+ }
+
+ static void m3(A a, boolean do_call) {
+ if (!do_call) {
+ return;
+ }
+ m2(a);
+ }
+
+ public static ClassLoader newClassLoader() {
+ try {
+ return new URLClassLoader(new URL[] {
+ Paths.get(System.getProperty("test.classes",".")).toUri().toURL(),
+ }, null);
+ } catch (MalformedURLException e){
+ throw new RuntimeException("Unexpected URL conversion failure", e);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ ClassLoader loader = newClassLoader();
+ Object o = loader.loadClass("B").newInstance();
+ // collect conflicting profiles
+ for (int i = 0; i < 5000; i++) {
+ m2(o);
+ }
+ // prepare for conflict
+ A a = new A();
+ for (int i = 0; i < 5000; i++) {
+ m3(a, false);
+ }
+ // unload class in profile
+ o = null;
+ loader = null;
+ System.gc();
+ // record the conflict
+ m3(a, true);
+ // trigger another GC
+ System.gc();
+ }
+}