8026054: New type profiling points: type of return values at calls
Summary: x86 interpreter and c1 type profiling for return values at calls
Reviewed-by: kvn, twisti
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -79,7 +79,7 @@
// GC Ergo Flags
define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
-define_pd_global(uintx, TypeProfileLevel, 1);
+define_pd_global(uintx, TypeProfileLevel, 11);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
\
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -1095,7 +1095,7 @@
return;
}
- if (MethodData::profile_arguments()) {
+ if (MethodData::profile_arguments() || MethodData::profile_return()) {
Label profile_continue;
test_method_data_pointer(mdp, profile_continue);
@@ -1105,35 +1105,95 @@
cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
jcc(Assembler::notEqual, profile_continue);
- Label done;
- int off_to_args = in_bytes(TypeStackSlotEntries::args_data_offset());
- addptr(mdp, off_to_args);
+ if (MethodData::profile_arguments()) {
+ Label done;
+ int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+ addptr(mdp, off_to_args);
- for (int i = 0; i < TypeProfileArgsLimit; i++) {
- if (i > 0) {
- movl(tmp, Address(mdp, in_bytes(TypeStackSlotEntries::cell_count_offset())-off_to_args));
- subl(tmp, i*TypeStackSlotEntries::per_arg_count());
- cmpl(tmp, TypeStackSlotEntries::per_arg_count());
- jcc(Assembler::less, done);
+ for (int i = 0; i < TypeProfileArgsLimit; i++) {
+ if (i > 0 || MethodData::profile_return()) {
+ // If return value type is profiled we may have no argument to profile
+ movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+ subl(tmp, i*TypeStackSlotEntries::per_arg_count());
+ cmpl(tmp, TypeStackSlotEntries::per_arg_count());
+ jcc(Assembler::less, done);
+ }
+ movptr(tmp, Address(callee, Method::const_offset()));
+ load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
+ // stack offset o (zero based) from the start of the argument
+ // list, for n arguments translates into offset n - o - 1 from
+ // the end of the argument list
+ subl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
+ subl(tmp, 1);
+ Address arg_addr = argument_address(tmp);
+ movptr(tmp, arg_addr);
+
+ Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
+ profile_obj_type(tmp, mdo_arg_addr);
+
+ int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+ addptr(mdp, to_add);
+ off_to_args += to_add;
}
- movptr(tmp, Address(callee, Method::const_offset()));
- load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
- subl(tmp, Address(mdp, in_bytes(TypeStackSlotEntries::stack_slot_offset(i))-off_to_args));
- subl(tmp, 1);
- Address arg_addr = argument_address(tmp);
- movptr(tmp, arg_addr);
+
+ if (MethodData::profile_return()) {
+ movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+ subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+ }
+
+ bind(done);
- Address mdo_arg_addr(mdp, in_bytes(TypeStackSlotEntries::type_offset(i))-off_to_args);
- profile_obj_type(tmp, mdo_arg_addr);
-
- int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
- addptr(mdp, to_add);
- off_to_args += to_add;
+ if (MethodData::profile_return()) {
+ // We're right after the type profile for the last
+ // argument. tmp is the number of cell left in the
+ // CallTypeData/VirtualCallTypeData to reach its end. Non null
+ // if there's a return to profile.
+ assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+ shll(tmp, exact_log2(DataLayout::cell_size));
+ addptr(mdp, tmp);
+ }
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
+ } else {
+ assert(MethodData::profile_return(), "either profile call args or call ret");
+ update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
}
- bind(done);
+ // mdp points right after the end of the
+ // CallTypeData/VirtualCallTypeData, right after the cells for the
+ // return value type if there's one
+
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
+ assert_different_registers(mdp, ret, tmp, rsi);
+ if (ProfileInterpreter && MethodData::profile_return()) {
+ Label profile_continue, done;
+
+ test_method_data_pointer(mdp, profile_continue);
- movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
+ if (MethodData::profile_return_jsr292_only()) {
+ // If we don't profile all invoke bytecodes we must make sure
+ // it's a bytecode we indeed profile. We can't go back to the
+ // begining of the ProfileData we intend to update to check its
+ // type because we're right after it and we don't known its
+ // length
+ Label do_profile;
+ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
+ jcc(Assembler::equal, do_profile);
+ cmpb(Address(rsi, 0), Bytecodes::_invokehandle);
+ jcc(Assembler::equal, do_profile);
+ get_method(tmp);
+ cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
+ jcc(Assembler::notEqual, profile_continue);
+
+ bind(do_profile);
+ }
+
+ Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
+ mov(tmp, ret);
+ profile_obj_type(tmp, mdo_ret_addr);
bind(profile_continue);
}
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -217,6 +217,7 @@
void profile_not_taken_branch(Register mdp);
void profile_obj_type(Register obj, const Address& mdo_addr);
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
+ void profile_return_type(Register mdp, Register ret, Register tmp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -1120,7 +1120,7 @@
return;
}
- if (MethodData::profile_arguments()) {
+ if (MethodData::profile_arguments() || MethodData::profile_return()) {
Label profile_continue;
test_method_data_pointer(mdp, profile_continue);
@@ -1130,35 +1130,92 @@
cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
jcc(Assembler::notEqual, profile_continue);
- Label done;
- int off_to_args = in_bytes(TypeStackSlotEntries::args_data_offset());
- addptr(mdp, off_to_args);
+ if (MethodData::profile_arguments()) {
+ Label done;
+ int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+ addptr(mdp, off_to_args);
- for (int i = 0; i < TypeProfileArgsLimit; i++) {
- if (i > 0) {
- movq(tmp, Address(mdp, in_bytes(TypeStackSlotEntries::cell_count_offset())-off_to_args));
- subl(tmp, i*TypeStackSlotEntries::per_arg_count());
- cmpl(tmp, TypeStackSlotEntries::per_arg_count());
- jcc(Assembler::less, done);
+ for (int i = 0; i < TypeProfileArgsLimit; i++) {
+ if (i > 0 || MethodData::profile_return()) {
+ // If return value type is profiled we may have no argument to profile
+ movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+ subl(tmp, i*TypeStackSlotEntries::per_arg_count());
+ cmpl(tmp, TypeStackSlotEntries::per_arg_count());
+ jcc(Assembler::less, done);
+ }
+ movptr(tmp, Address(callee, Method::const_offset()));
+ load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
+ subq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
+ subl(tmp, 1);
+ Address arg_addr = argument_address(tmp);
+ movptr(tmp, arg_addr);
+
+ Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
+ profile_obj_type(tmp, mdo_arg_addr);
+
+ int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+ addptr(mdp, to_add);
+ off_to_args += to_add;
}
- movptr(tmp, Address(callee, Method::const_offset()));
- load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
- subq(tmp, Address(mdp, in_bytes(TypeStackSlotEntries::stack_slot_offset(i))-off_to_args));
- subl(tmp, 1);
- Address arg_addr = argument_address(tmp);
- movptr(tmp, arg_addr);
+
+ if (MethodData::profile_return()) {
+ movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+ subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+ }
+
+ bind(done);
- Address mdo_arg_addr(mdp, in_bytes(TypeStackSlotEntries::type_offset(i))-off_to_args);
- profile_obj_type(tmp, mdo_arg_addr);
-
- int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
- addptr(mdp, to_add);
- off_to_args += to_add;
+ if (MethodData::profile_return()) {
+ // We're right after the type profile for the last
+ // argument. tmp is the number of cell left in the
+ // CallTypeData/VirtualCallTypeData to reach its end. Non null
+ // if there's a return to profile.
+ assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+ shll(tmp, exact_log2(DataLayout::cell_size));
+ addptr(mdp, tmp);
+ }
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
+ } else {
+ assert(MethodData::profile_return(), "either profile call args or call ret");
+ update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
}
- bind(done);
+ // mdp points right after the end of the
+ // CallTypeData/VirtualCallTypeData, right after the cells for the
+ // return value type if there's one
+
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
+ assert_different_registers(mdp, ret, tmp, r13);
+ if (ProfileInterpreter && MethodData::profile_return()) {
+ Label profile_continue, done;
+
+ test_method_data_pointer(mdp, profile_continue);
- movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
+ if (MethodData::profile_return_jsr292_only()) {
+ // If we don't profile all invoke bytecodes we must make sure
+ // it's a bytecode we indeed profile. We can't go back to the
+ // begining of the ProfileData we intend to update to check its
+ // type because we're right after it and we don't known its
+ // length
+ Label do_profile;
+ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
+ jcc(Assembler::equal, do_profile);
+ cmpb(Address(r13, 0), Bytecodes::_invokehandle);
+ jcc(Assembler::equal, do_profile);
+ get_method(tmp);
+ cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
+ jcc(Assembler::notEqual, profile_continue);
+
+ bind(do_profile);
+ }
+
+ Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
+ mov(tmp, ret);
+ profile_obj_type(tmp, mdo_ret_addr);
bind(profile_continue);
}
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -226,6 +226,7 @@
void profile_not_taken_branch(Register mdp);
void profile_obj_type(Register obj, const Address& mdo_addr);
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
+ void profile_return_type(Register mdp, Register ret, Register tmp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -194,6 +194,12 @@
__ restore_bcp();
__ restore_locals();
+ if (incoming_state == atos) {
+ Register mdp = rbx;
+ Register tmp = rcx;
+ __ profile_return_type(mdp, rax, tmp);
+ }
+
Label L_got_cache, L_giant_index;
if (EnableInvokeDynamic) {
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -177,6 +177,12 @@
__ restore_bcp();
__ restore_locals();
+ if (state == atos) {
+ Register mdp = rbx;
+ Register tmp = rcx;
+ __ profile_return_type(mdp, rax, tmp);
+ }
+
Label L_got_cache, L_giant_index;
if (EnableInvokeDynamic) {
__ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -935,6 +935,7 @@
void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {}
void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
+void Canonicalizer::do_ProfileReturnType(ProfileReturnType* x) {}
void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -104,6 +104,7 @@
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x);
+ virtual void do_ProfileReturnType (ProfileReturnType* x);
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -1466,9 +1466,22 @@
// State at end of inlined method is the state of the caller
// without the method parameters on stack, including the
// return value, if any, of the inlined method on operand stack.
+ int invoke_bci = state()->caller_state()->bci();
set_state(state()->caller_state()->copy_for_parsing());
if (x != NULL) {
state()->push(x->type(), x);
+ if (profile_calls() && MethodData::profile_return() && x->type()->is_object_kind()) {
+ ciMethod* caller = state()->scope()->method();
+ ciMethodData* md = caller->method_data_or_null();
+ ciProfileData* data = md->bci_to_data(invoke_bci);
+ if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+ bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
+ // May not be true in case of an inlined call through a method handle intrinsic.
+ if (has_return) {
+ profile_return_type(x, method(), caller, invoke_bci);
+ }
+ }
+ }
}
Goto* goto_callee = new Goto(continuation(), false);
@@ -2008,6 +2021,9 @@
push(result_type, result);
}
}
+ if (profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
+ profile_return_type(result, target);
+ }
}
@@ -3556,6 +3572,10 @@
Value value = append_split(result);
if (result_type != voidType) push(result_type, value);
+ if (callee != method() && profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
+ profile_return_type(result, callee);
+ }
+
// done
return true;
}
@@ -4312,6 +4332,21 @@
append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
}
+void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
+ assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together");
+ if (m == NULL) {
+ m = method();
+ }
+ if (invoke_bci < 0) {
+ invoke_bci = bci();
+ }
+ ciMethodData* md = m->method_data_or_null();
+ ciProfileData* data = md->bci_to_data(invoke_bci);
+ if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+ append(new ProfileReturnType(m , invoke_bci, callee, ret));
+ }
+}
+
void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
append(new ProfileInvoke(callee, state));
}
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -375,6 +375,7 @@
void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
+ void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1);
void profile_invocation(ciMethod* inlinee, ValueStack* state);
// Shortcuts to profiling control.
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -107,6 +107,7 @@
class UnsafePrefetchRead;
class UnsafePrefetchWrite;
class ProfileCall;
+class ProfileReturnType;
class ProfileInvoke;
class RuntimeCall;
class MemBar;
@@ -211,6 +212,7 @@
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x) = 0;
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
virtual void do_ProfileCall (ProfileCall* x) = 0;
+ virtual void do_ProfileReturnType (ProfileReturnType* x) = 0;
virtual void do_ProfileInvoke (ProfileInvoke* x) = 0;
virtual void do_RuntimeCall (RuntimeCall* x) = 0;
virtual void do_MemBar (MemBar* x) = 0;
@@ -2518,6 +2520,38 @@
}
};
+LEAF(ProfileReturnType, Instruction)
+ private:
+ ciMethod* _method;
+ ciMethod* _callee;
+ int _bci_of_invoke;
+ Value _ret;
+
+ public:
+ ProfileReturnType(ciMethod* method, int bci, ciMethod* callee, Value ret)
+ : Instruction(voidType)
+ , _method(method)
+ , _callee(callee)
+ , _bci_of_invoke(bci)
+ , _ret(ret)
+ {
+ set_needs_null_check(true);
+ // The ProfileType has side-effects and must occur precisely where located
+ pin();
+ }
+
+ ciMethod* method() const { return _method; }
+ ciMethod* callee() const { return _callee; }
+ int bci_of_invoke() const { return _bci_of_invoke; }
+ Value ret() const { return _ret; }
+
+ virtual void input_values_do(ValueVisitor* f) {
+ if (_ret != NULL) {
+ f->visit(&_ret);
+ }
+ }
+};
+
// Call some C runtime function that doesn't safepoint,
// optionally passing the current thread as the first argument.
LEAF(RuntimeCall, Instruction)
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -904,6 +904,12 @@
output()->put(')');
}
+void InstructionPrinter::do_ProfileReturnType(ProfileReturnType* x) {
+ output()->print("profile ret type ");
+ print_value(x->ret());
+ output()->print(" %s.%s", x->method()->holder()->name()->as_utf8(), x->method()->name()->as_utf8());
+ output()->put(')');
+}
void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
output()->print("profile_invoke ");
output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -132,6 +132,7 @@
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x);
+ virtual void do_ProfileReturnType (ProfileReturnType* x);
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -3089,7 +3089,7 @@
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
int start = 0;
- int stop = args->number_of_arguments();
+ int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
if (x->nb_profiled_args() < stop) {
// if called through method handle invoke, some arguments may have been popped
stop = x->nb_profiled_args();
@@ -3099,7 +3099,7 @@
bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
for (int i = 0; i < stop; i++) {
- int off = in_bytes(TypeStackSlotEntries::type_offset(i)) - in_bytes(TypeStackSlotEntries::args_data_offset());
+ int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
ciKlass* exact = profile_arg_type(md, base_offset, off,
args->type(i), x->profiled_arg_at(i+start), mdp,
!x->arg_needs_null_check(i+start), sig_stream.next_klass());
@@ -3131,6 +3131,21 @@
__ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
}
+void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
+ int bci = x->bci_of_invoke();
+ ciMethodData* md = x->method()->method_data_or_null();
+ ciProfileData* data = md->bci_to_data(bci);
+ assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
+ ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
+ LIR_Opr mdp = LIR_OprFact::illegalOpr;
+ ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
+ ret->type(), x->ret(), mdp,
+ !x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
+ if (exact != NULL) {
+ md->set_return_type(bci, exact);
+ }
+}
+
void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
// We can safely ignore accessors here, since c2 will inline them anyway,
// accessors are also always mature.
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -536,6 +536,7 @@
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x);
+ virtual void do_ProfileReturnType (ProfileReturnType* x);
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
--- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -531,6 +531,7 @@
void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
void do_ProfileCall (ProfileCall* x);
+ void do_ProfileReturnType (ProfileReturnType* x);
void do_ProfileInvoke (ProfileInvoke* x);
void do_RuntimeCall (RuntimeCall* x);
void do_MemBar (MemBar* x);
@@ -658,6 +659,7 @@
void handle_ExceptionObject (ExceptionObject* x);
void handle_Phi (Phi* x);
void handle_ProfileCall (ProfileCall* x);
+ void handle_ProfileReturnType (ProfileReturnType* x);
};
@@ -718,6 +720,7 @@
void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check();
nce()->handle_ProfileCall(x); }
+void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); }
void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {}
void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {}
void NullCheckVisitor::do_MemBar (MemBar* x) {}
@@ -1142,6 +1145,10 @@
}
}
+void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) {
+ x->set_needs_null_check(!set_contains(x->ret()));
+}
+
void Optimizer::eliminate_null_checks() {
ResourceMark rm;
--- a/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -162,6 +162,7 @@
void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ };
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
void do_ProfileCall (ProfileCall* x) { /* nothing to do */ };
+ void do_ProfileReturnType (ProfileReturnType* x) { /* nothing to do */ };
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
void do_MemBar (MemBar* x) { /* nothing to do */ };
--- a/hotspot/src/share/vm/c1/c1_ValueMap.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueMap.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -203,6 +203,7 @@
void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ }
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
void do_ProfileCall (ProfileCall* x) { /* nothing to do */ }
+ void do_ProfileReturnType (ProfileReturnType* x) { /* nothing to do */ }
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
void do_MemBar (MemBar* x) { /* nothing to do */ };
--- a/hotspot/src/share/vm/ci/ciMethodData.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -137,12 +137,17 @@
void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) {
- for (int i = 0; i < number_of_arguments(); i++) {
+ for (int i = 0; i < _number_of_entries; i++) {
intptr_t k = entries->type(i);
TypeStackSlotEntries::set_type(i, translate_klass(k));
}
}
+void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
+ intptr_t k = ret->type();
+ set_type(translate_klass(k));
+}
+
// Get the data at an arbitrary (sort of) data index.
ciProfileData* ciMethodData::data_at(int data_index) {
if (out_of_bounds(data_index)) {
@@ -313,6 +318,20 @@
}
}
+void ciMethodData::set_return_type(int bci, ciKlass* k) {
+ VM_ENTRY_MARK;
+ MethodData* mdo = get_MethodData();
+ if (mdo != NULL) {
+ ProfileData* data = mdo->bci_to_data(bci);
+ if (data->is_CallTypeData()) {
+ data->as_CallTypeData()->set_return_type(k->get_Klass());
+ } else {
+ assert(data->is_VirtualCallTypeData(), "no arguments!");
+ data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
+ }
+ }
+}
+
bool ciMethodData::has_escape_info() {
return eflag_set(MethodData::estimated);
}
@@ -517,9 +536,7 @@
}
void ciTypeStackSlotEntries::print_data_on(outputStream* st) const {
- _pd->tab(st, true);
- st->print("argument types");
- for (int i = 0; i < number_of_arguments(); i++) {
+ for (int i = 0; i < _number_of_entries; i++) {
_pd->tab(st);
st->print("%d: stack (%u) ", i, stack_slot(i));
print_ciklass(st, type(i));
@@ -527,9 +544,25 @@
}
}
+void ciReturnTypeEntry::print_data_on(outputStream* st) const {
+ _pd->tab(st);
+ st->print("ret ");
+ print_ciklass(st, type());
+ st->cr();
+}
+
void ciCallTypeData::print_data_on(outputStream* st) const {
print_shared(st, "ciCallTypeData");
- args()->print_data_on(st);
+ if (has_arguments()) {
+ tab(st, true);
+ st->print("argument types");
+ args()->print_data_on(st);
+ }
+ if (has_return()) {
+ tab(st, true);
+ st->print("return type");
+ ret()->print_data_on(st);
+ }
}
void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const {
@@ -561,6 +594,15 @@
void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
print_shared(st, "ciVirtualCallTypeData");
rtd_super()->print_receiver_data_on(st);
- args()->print_data_on(st);
+ if (has_arguments()) {
+ tab(st, true);
+ st->print("argument types");
+ args()->print_data_on(st);
+ }
+ if (has_return()) {
+ tab(st, true);
+ st->print("return type");
+ ret()->print_data_on(st);
+ }
}
#endif
--- a/hotspot/src/share/vm/ci/ciMethodData.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -104,20 +104,55 @@
#endif
};
+class ciReturnTypeEntry : public ReturnTypeEntry, ciTypeEntries {
+public:
+ void translate_type_data_from(const ReturnTypeEntry* ret);
+
+ ciKlass* valid_type() const {
+ return valid_ciklass(type());
+ }
+
+#ifndef PRODUCT
+ void print_data_on(outputStream* st) const;
+#endif
+};
+
class ciCallTypeData : public CallTypeData {
public:
ciCallTypeData(DataLayout* layout) : CallTypeData(layout) {}
ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
+ ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
- virtual void translate_from(const ProfileData* data) {
- args()->translate_type_data_from(data->as_CallTypeData()->args());
+ void translate_type_data_from(const ProfileData* data) {
+ if (has_arguments()) {
+ args()->translate_type_data_from(data->as_CallTypeData()->args());
+ }
+ if (has_return()) {
+ ret()->translate_type_data_from(data->as_CallTypeData()->ret());
+ }
+ }
+
+ intptr_t argument_type(int i) const {
+ assert(has_arguments(), "no arg type profiling data");
+ return args()->type(i);
}
ciKlass* valid_argument_type(int i) const {
+ assert(has_arguments(), "no arg type profiling data");
return args()->valid_type(i);
}
+ intptr_t return_type() const {
+ assert(has_return(), "no ret type profiling data");
+ return ret()->type();
+ }
+
+ ciKlass* valid_return_type() const {
+ assert(has_return(), "no ret type profiling data");
+ return ret()->valid_type();
+ }
+
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
@@ -179,12 +214,9 @@
private:
// Fake multiple inheritance... It's a ciReceiverTypeData also.
ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
-
public:
ciVirtualCallTypeData(DataLayout* layout) : VirtualCallTypeData(layout) {}
- ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); }
-
void set_receiver(uint row, ciKlass* recv) {
rtd_super()->set_receiver(row, recv);
}
@@ -193,16 +225,40 @@
return rtd_super()->receiver(row);
}
+ ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); }
+ ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)VirtualCallTypeData::ret(); }
+
// Copy & translate from oop based VirtualCallData
virtual void translate_from(const ProfileData* data) {
rtd_super()->translate_receiver_data_from(data);
- args()->translate_type_data_from(data->as_VirtualCallTypeData()->args());
+ if (has_arguments()) {
+ args()->translate_type_data_from(data->as_VirtualCallTypeData()->args());
+ }
+ if (has_return()) {
+ ret()->translate_type_data_from(data->as_VirtualCallTypeData()->ret());
+ }
+ }
+
+ intptr_t argument_type(int i) const {
+ assert(has_arguments(), "no arg type profiling data");
+ return args()->type(i);
}
ciKlass* valid_argument_type(int i) const {
+ assert(has_arguments(), "no arg type profiling data");
return args()->valid_type(i);
}
+ intptr_t return_type() const {
+ assert(has_return(), "no ret type profiling data");
+ return ret()->type();
+ }
+
+ ciKlass* valid_return_type() const {
+ assert(has_return(), "no ret type profiling data");
+ return ret()->valid_type();
+ }
+
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
@@ -347,6 +403,7 @@
// If the compiler finds a profiled type that is known statically
// for sure, set it in the MethodData
void set_argument_type(int bci, int i, ciKlass* k);
+ void set_return_type(int bci, ciKlass* k);
void load_data();
--- a/hotspot/src/share/vm/oops/methodData.cpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/oops/methodData.cpp Sat Oct 12 12:12:59 2013 +0200
@@ -156,16 +156,31 @@
}
#endif // !PRODUCT
-int TypeStackSlotEntries::compute_cell_count(BytecodeStream* stream) {
- int max = TypeProfileArgsLimit;
- assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
- Bytecode_invoke inv(stream->method(), stream->bci());
+int TypeStackSlotEntries::compute_cell_count(Symbol* signature, int max) {
+ ResourceMark rm;
+ SignatureStream ss(signature);
+ int args_count = MIN2(ss.reference_parameter_count(), max);
+ return args_count * per_arg_cell_count;
+}
- ResourceMark rm;
- SignatureStream ss(inv.signature());
- int args_count = MIN2(ss.reference_parameter_count(), max);
+int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
+ assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
+ assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
+ Bytecode_invoke inv(stream->method(), stream->bci());
+ int args_cell = 0;
+ if (arguments_profiling_enabled()) {
+ args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), TypeProfileArgsLimit);
+ }
+ int ret_cell = 0;
+ if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
+ ret_cell = ReturnTypeEntry::static_cell_count();
+ }
+ int header_cell = 0;
+ if (args_cell + ret_cell > 0) {
+ header_cell = header_cell_count();
+ }
- return args_count * per_arg_cell_count + (args_count > 0 ? header_cell_count() : 0);
+ return header_cell + args_cell + ret_cell;
}
class ArgumentOffsetComputer : public SignatureInfo {
@@ -197,26 +212,55 @@
int off_at(int i) const { return _offsets.at(i); }
};
-void TypeStackSlotEntries::post_initialize(BytecodeStream* stream) {
+void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver) {
ResourceMark rm;
+ ArgumentOffsetComputer aos(signature, _number_of_entries);
+ aos.total();
+ for (int i = 0; i < _number_of_entries; i++) {
+ set_stack_slot(i, aos.off_at(i) + (has_receiver ? 1 : 0));
+ set_type(i, type_none());
+ }
+}
+void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
Bytecode_invoke inv(stream->method(), stream->bci());
+ SignatureStream ss(inv.signature());
+ if (has_arguments()) {
#ifdef ASSERT
- SignatureStream ss(inv.signature());
- int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
- assert(count > 0, "room for args type but none found?");
- check_number_of_arguments(count);
+ ResourceMark rm;
+ int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
+ assert(count > 0, "room for args type but none found?");
+ check_number_of_arguments(count);
#endif
+ _args.post_initialize(inv.signature(), inv.has_receiver());
+ }
+
+ if (has_return()) {
+ assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
+ _ret.post_initialize();
+ }
+}
- int start = 0;
- ArgumentOffsetComputer aos(inv.signature(), number_of_arguments()-start);
- aos.total();
- bool has_receiver = inv.has_receiver();
- for (int i = start; i < number_of_arguments(); i++) {
- set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
- set_type(i, type_none());
+void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
+ assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
+ Bytecode_invoke inv(stream->method(), stream->bci());
+
+ if (has_arguments()) {
+#ifdef ASSERT
+ ResourceMark rm;
+ SignatureStream ss(inv.signature());
+ int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
+ assert(count > 0, "room for args type but none found?");
+ check_number_of_arguments(count);
+#endif
+ _args.post_initialize(inv.signature(), inv.has_receiver());
+ }
+
+ if (has_return()) {
+ assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
+ _ret.post_initialize();
}
}
@@ -226,7 +270,7 @@
}
void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
- for (int i = 0; i < number_of_arguments(); i++) {
+ for (int i = 0; i < _number_of_entries; i++) {
intptr_t p = type(i);
if (is_loader_alive(is_alive_cl, p)) {
set_type(i, type_none());
@@ -234,7 +278,18 @@
}
}
-bool TypeStackSlotEntries::arguments_profiling_enabled() {
+void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
+ intptr_t p = type();
+ if (is_loader_alive(is_alive_cl, p)) {
+ set_type(type_none());
+ }
+}
+
+bool TypeEntriesAtCall::return_profiling_enabled() {
+ return MethodData::profile_return();
+}
+
+bool TypeEntriesAtCall::arguments_profiling_enabled() {
return MethodData::profile_arguments();
}
@@ -253,9 +308,7 @@
}
void TypeStackSlotEntries::print_data_on(outputStream* st) const {
- _pd->tab(st, true);
- st->print("argument types");
- for (int i = 0; i < number_of_arguments(); i++) {
+ for (int i = 0; i < _number_of_entries; i++) {
_pd->tab(st);
st->print("%d: stack(%u) ", i, stack_slot(i));
print_klass(st, type(i));
@@ -263,14 +316,38 @@
}
}
+void ReturnTypeEntry::print_data_on(outputStream* st) const {
+ _pd->tab(st);
+ print_klass(st, type());
+ st->cr();
+}
+
void CallTypeData::print_data_on(outputStream* st) const {
CounterData::print_data_on(st);
- _args.print_data_on(st);
+ if (has_arguments()) {
+ tab(st, true);
+ st->print("argument types");
+ _args.print_data_on(st);
+ }
+ if (has_return()) {
+ tab(st, true);
+ st->print("return type");
+ _ret.print_data_on(st);
+ }
}
void VirtualCallTypeData::print_data_on(outputStream* st) const {
VirtualCallData::print_data_on(st);
- _args.print_data_on(st);
+ if (has_arguments()) {
+ tab(st, true);
+ st->print("argument types");
+ _args.print_data_on(st);
+ }
+ if (has_return()) {
+ tab(st, true);
+ st->print("return type");
+ _ret.print_data_on(st);
+ }
}
#endif
@@ -530,7 +607,7 @@
}
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
- if (MethodData::profile_arguments()) {
+ if (MethodData::profile_arguments() || MethodData::profile_return()) {
return variable_cell_count;
} else {
return CounterData::static_cell_count();
@@ -542,13 +619,13 @@
return JumpData::static_cell_count();
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
- if (MethodData::profile_arguments()) {
+ if (MethodData::profile_arguments() || MethodData::profile_return()) {
return variable_cell_count;
} else {
return VirtualCallData::static_cell_count();
}
case Bytecodes::_invokedynamic:
- if (MethodData::profile_arguments()) {
+ if (MethodData::profile_arguments() || MethodData::profile_return()) {
return variable_cell_count;
} else {
return CounterData::static_cell_count();
@@ -596,8 +673,9 @@
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
- assert(MethodData::profile_arguments(), "should be collecting args profile");
- if (profile_arguments_for_invoke(stream->method(), stream->bci())) {
+ assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
+ if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+ profile_return_for_invoke(stream->method(), stream->bci())) {
cell_count = CallTypeData::compute_cell_count(stream);
} else {
cell_count = CounterData::static_cell_count();
@@ -605,8 +683,9 @@
break;
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface: {
- assert(MethodData::profile_arguments(), "should be collecting args profile");
- if (profile_arguments_for_invoke(stream->method(), stream->bci())) {
+ assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
+ if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+ profile_return_for_invoke(stream->method(), stream->bci())) {
cell_count = VirtualCallTypeData::compute_cell_count(stream);
} else {
cell_count = VirtualCallData::static_cell_count();
@@ -699,7 +778,8 @@
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: {
int counter_data_cell_count = CounterData::static_cell_count();
- if (profile_arguments_for_invoke(stream->method(), stream->bci())) {
+ if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+ profile_return_for_invoke(stream->method(), stream->bci())) {
cell_count = CallTypeData::compute_cell_count(stream);
} else {
cell_count = counter_data_cell_count;
@@ -721,7 +801,8 @@
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface: {
int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
- if (profile_arguments_for_invoke(stream->method(), stream->bci())) {
+ if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+ profile_return_for_invoke(stream->method(), stream->bci())) {
cell_count = VirtualCallTypeData::compute_cell_count(stream);
} else {
cell_count = virtual_call_data_cell_count;
@@ -736,7 +817,8 @@
case Bytecodes::_invokedynamic: {
// %%% should make a type profile for any invokedynamic that takes a ref argument
int counter_data_cell_count = CounterData::static_cell_count();
- if (profile_arguments_for_invoke(stream->method(), stream->bci())) {
+ if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+ profile_return_for_invoke(stream->method(), stream->bci())) {
cell_count = CallTypeData::compute_cell_count(stream);
} else {
cell_count = counter_data_cell_count;
@@ -778,7 +860,7 @@
break;
}
assert(tag == DataLayout::multi_branch_data_tag ||
- (MethodData::profile_arguments() &&
+ ((MethodData::profile_arguments() || MethodData::profile_return()) &&
(tag == DataLayout::call_type_data_tag ||
tag == DataLayout::counter_data_tag ||
tag == DataLayout::virtual_call_type_data_tag ||
@@ -1111,7 +1193,7 @@
}
int MethodData::profile_arguments_flag() {
- return TypeProfileLevel;
+ return TypeProfileLevel % 10;
}
bool MethodData::profile_arguments() {
@@ -1139,3 +1221,31 @@
return profile_jsr292(m, bci);
}
+int MethodData::profile_return_flag() {
+ return TypeProfileLevel / 10;
+}
+
+bool MethodData::profile_return() {
+ return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
+}
+
+bool MethodData::profile_return_jsr292_only() {
+ return profile_return_flag() == type_profile_jsr292;
+}
+
+bool MethodData::profile_all_return() {
+ return profile_return_flag() == type_profile_all;
+}
+
+bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
+ if (!profile_return()) {
+ return false;
+ }
+
+ if (profile_all_return()) {
+ return true;
+ }
+
+ assert(profile_return_jsr292_only(), "inconsistent");
+ return profile_jsr292(m, bci);
+}
--- a/hotspot/src/share/vm/oops/methodData.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/oops/methodData.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -271,6 +271,7 @@
// data in a structured way.
class ProfileData : public ResourceObj {
friend class TypeEntries;
+ friend class ReturnTypeEntry;
friend class TypeStackSlotEntries;
private:
#ifndef PRODUCT
@@ -748,119 +749,60 @@
per_arg_cell_count
};
- // Start with a header if needed. It stores the number of cells used
- // for this call type information. Unless we collect only profiling
- // for a single argument the number of cells is unknown statically.
- static int header_cell_count() {
- return (TypeProfileArgsLimit > 1) ? 1 : 0;
- }
-
- static int cell_count_local_offset() {
- assert(arguments_profiling_enabled() && TypeProfileArgsLimit > 1, "no cell count");
- return 0;
- }
-
- int cell_count_global_offset() const {
- return _base_off + cell_count_local_offset();
- }
-
// offset of cell for stack slot for entry i within ProfileData object
- int stack_slot_global_offset(int i) const {
+ int stack_slot_offset(int i) const {
return _base_off + stack_slot_local_offset(i);
}
- void check_number_of_arguments(int total) {
- assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
- }
-
- // number of cells not counting the header
- int cell_count_no_header() const {
- return _pd->uint_at(cell_count_global_offset());
- }
-
- static bool arguments_profiling_enabled();
- static void assert_arguments_profiling_enabled() {
- assert(arguments_profiling_enabled(), "args profiling should be on");
- }
-
protected:
+ const int _number_of_entries;
// offset of cell for type for entry i within ProfileData object
- int type_global_offset(int i) const {
+ int type_offset(int i) const {
return _base_off + type_local_offset(i);
}
public:
- TypeStackSlotEntries(int base_off)
- : TypeEntries(base_off) {}
-
- static int compute_cell_count(BytecodeStream* stream);
-
- static void initialize(DataLayout* dl, int base, int cell_count) {
- if (TypeProfileArgsLimit > 1) {
- int off = base + cell_count_local_offset();
- dl->set_cell_at(off, cell_count - base - header_cell_count());
- }
- }
-
- void post_initialize(BytecodeStream* stream);
+ TypeStackSlotEntries(int base_off, int nb_entries)
+ : TypeEntries(base_off), _number_of_entries(nb_entries) {}
- int number_of_arguments() const {
- assert_arguments_profiling_enabled();
- if (TypeProfileArgsLimit > 1) {
- int cell_count = cell_count_no_header();
- int nb = cell_count / TypeStackSlotEntries::per_arg_count();
- assert(nb > 0 && nb <= TypeProfileArgsLimit , "only when we profile args");
- return nb;
- } else {
- assert(TypeProfileArgsLimit == 1, "at least one arg");
- return 1;
- }
- }
+ static int compute_cell_count(Symbol* signature, int max);
- int cell_count() const {
- assert_arguments_profiling_enabled();
- if (TypeProfileArgsLimit > 1) {
- return _base_off + header_cell_count() + _pd->int_at_unchecked(cell_count_global_offset());
- } else {
- return _base_off + TypeStackSlotEntries::per_arg_count();
- }
- }
+ void post_initialize(Symbol* signature, bool has_receiver);
// offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
static int stack_slot_local_offset(int i) {
- assert_arguments_profiling_enabled();
- return header_cell_count() + i * per_arg_cell_count + stack_slot_entry;
+ return i * per_arg_cell_count + stack_slot_entry;
}
// offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
static int type_local_offset(int i) {
- return header_cell_count() + i * per_arg_cell_count + type_entry;
+ return i * per_arg_cell_count + type_entry;
}
// stack slot for entry i
uint stack_slot(int i) const {
- assert(i >= 0 && i < number_of_arguments(), "oob");
- return _pd->uint_at(stack_slot_global_offset(i));
+ assert(i >= 0 && i < _number_of_entries, "oob");
+ return _pd->uint_at(stack_slot_offset(i));
}
// set stack slot for entry i
void set_stack_slot(int i, uint num) {
- assert(i >= 0 && i < number_of_arguments(), "oob");
- _pd->set_uint_at(stack_slot_global_offset(i), num);
+ assert(i >= 0 && i < _number_of_entries, "oob");
+ _pd->set_uint_at(stack_slot_offset(i), num);
}
// type for entry i
intptr_t type(int i) const {
- assert(i >= 0 && i < number_of_arguments(), "oob");
- return _pd->intptr_at(type_global_offset(i));
+ assert(i >= 0 && i < _number_of_entries, "oob");
+ return _pd->intptr_at(type_offset(i));
}
// set type for entry i
void set_type(int i, intptr_t k) {
- assert(i >= 0 && i < number_of_arguments(), "oob");
- _pd->set_intptr_at(type_global_offset(i), k);
+ assert(i >= 0 && i < _number_of_entries, "oob");
+ _pd->set_intptr_at(type_offset(i), k);
}
static ByteSize per_arg_size() {
@@ -871,22 +813,50 @@
return per_arg_cell_count ;
}
- // Code generation support
- static ByteSize cell_count_offset() {
- return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
- }
+ // GC support
+ void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
+
+#ifndef PRODUCT
+ void print_data_on(outputStream* st) const;
+#endif
+};
+
+// Type entry used for return from a call. A single cell to record the
+// type.
+class ReturnTypeEntry : public TypeEntries {
+
+private:
+ enum {
+ cell_count = 1
+ };
+
+public:
+ ReturnTypeEntry(int base_off)
+ : TypeEntries(base_off) {}
- static ByteSize args_data_offset() {
- return in_ByteSize(header_cell_count() * DataLayout::cell_size);
- }
+ void post_initialize() {
+ set_type(type_none());
+ }
+
+ intptr_t type() const {
+ return _pd->intptr_at(_base_off);
+ }
+
+ void set_type(intptr_t k) {
+ _pd->set_intptr_at(_base_off, k);
+ }
- static ByteSize stack_slot_offset(int i) {
- return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
- }
+ static int static_cell_count() {
+ return cell_count;
+ }
- static ByteSize type_offset(int i) {
- return in_ByteSize(type_local_offset(i) * DataLayout::cell_size);
- }
+ static ByteSize size() {
+ return in_ByteSize(cell_count * DataLayout::cell_size);
+ }
+
+ ByteSize type_offset() {
+ return DataLayout::cell_offset(_base_off);
+ }
// GC support
void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
@@ -896,23 +866,118 @@
#endif
};
+// Entries to collect type information at a call: contains arguments
+// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
+// number of cells. Because the number of cells for the return type is
+// smaller than the number of cells for the type of an arguments, the
+// number of cells is used to tell how many arguments are profiled and
+// whether a return value is profiled. See has_arguments() and
+// has_return().
+class TypeEntriesAtCall {
+private:
+ static int stack_slot_local_offset(int i) {
+ return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
+ }
+
+ static int argument_type_local_offset(int i) {
+ return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);;
+ }
+
+public:
+
+ static int header_cell_count() {
+ return 1;
+ }
+
+ static int cell_count_local_offset() {
+ return 0;
+ }
+
+ static int compute_cell_count(BytecodeStream* stream);
+
+ static void initialize(DataLayout* dl, int base, int cell_count) {
+ int off = base + cell_count_local_offset();
+ dl->set_cell_at(off, cell_count - base - header_cell_count());
+ }
+
+ static bool arguments_profiling_enabled();
+ static bool return_profiling_enabled();
+
+ // Code generation support
+ static ByteSize cell_count_offset() {
+ return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
+ }
+
+ static ByteSize args_data_offset() {
+ return in_ByteSize(header_cell_count() * DataLayout::cell_size);
+ }
+
+ static ByteSize stack_slot_offset(int i) {
+ return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
+ }
+
+ static ByteSize argument_type_offset(int i) {
+ return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
+ }
+};
+
// CallTypeData
//
// A CallTypeData is used to access profiling information about a non
-// virtual call for which we collect type information about arguments.
+// virtual call for which we collect type information about arguments
+// and return value.
class CallTypeData : public CounterData {
private:
+ // entries for arguments if any
TypeStackSlotEntries _args;
+ // entry for return type if any
+ ReturnTypeEntry _ret;
+
+ int cell_count_global_offset() const {
+ return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
+ }
+
+ // number of cells not counting the header
+ int cell_count_no_header() const {
+ return uint_at(cell_count_global_offset());
+ }
+
+ void check_number_of_arguments(int total) {
+ assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
+ }
+
+protected:
+ // An entry for a return value takes less space than an entry for an
+ // argument so if the number of cells exceeds the number of cells
+ // needed for an argument, this object contains type information for
+ // at least one argument.
+ bool has_arguments() const {
+ bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
+ assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
+ return res;
+ }
public:
CallTypeData(DataLayout* layout) :
- CounterData(layout), _args(CounterData::static_cell_count()) {
+ CounterData(layout),
+ _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
+ _ret(cell_count() - ReturnTypeEntry::static_cell_count())
+ {
assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
// Some compilers (VC++) don't want this passed in member initialization list
_args.set_profile_data(this);
+ _ret.set_profile_data(this);
}
- const TypeStackSlotEntries* args() const { return &_args; }
+ const TypeStackSlotEntries* args() const {
+ assert(has_arguments(), "no profiling of arguments");
+ return &_args;
+ }
+
+ const ReturnTypeEntry* ret() const {
+ assert(has_return(), "no profiling of return value");
+ return &_ret;
+ }
virtual bool is_CallTypeData() const { return true; }
@@ -921,38 +986,60 @@
}
static int compute_cell_count(BytecodeStream* stream) {
- return CounterData::static_cell_count() + TypeStackSlotEntries::compute_cell_count(stream);
+ return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
}
static void initialize(DataLayout* dl, int cell_count) {
- TypeStackSlotEntries::initialize(dl, CounterData::static_cell_count(), cell_count);
- }
-
- virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {
- _args.post_initialize(stream);
+ TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
}
+ virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
+
virtual int cell_count() const {
- return _args.cell_count();
+ return CounterData::static_cell_count() +
+ TypeEntriesAtCall::header_cell_count() +
+ int_at_unchecked(cell_count_global_offset());
}
- uint number_of_arguments() const {
- return args()->number_of_arguments();
+ int number_of_arguments() const {
+ return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
}
void set_argument_type(int i, Klass* k) {
+ assert(has_arguments(), "no arguments!");
intptr_t current = _args.type(i);
_args.set_type(i, TypeEntries::with_status(k, current));
}
+ void set_return_type(Klass* k) {
+ assert(has_return(), "no return!");
+ intptr_t current = _ret.type();
+ _ret.set_type(TypeEntries::with_status(k, current));
+ }
+
+ // An entry for a return value takes less space than an entry for an
+ // argument, so if the remainder of the number of cells divided by
+ // the number of cells for an argument is not null, a return value
+ // is profiled in this object.
+ bool has_return() const {
+ bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
+ assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
+ return res;
+ }
+
// Code generation support
static ByteSize args_data_offset() {
- return cell_offset(CounterData::static_cell_count()) + TypeStackSlotEntries::args_data_offset();
+ return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
- _args.clean_weak_klass_links(is_alive_closure);
+ if (has_arguments()) {
+ _args.clean_weak_klass_links(is_alive_closure);
+ }
+ if (has_return()) {
+ _ret.clean_weak_klass_links(is_alive_closure);
+ }
}
#ifndef PRODUCT
@@ -1105,20 +1192,59 @@
//
// A VirtualCallTypeData is used to access profiling information about
// a virtual call for which we collect type information about
-// arguments.
+// arguments and return value.
class VirtualCallTypeData : public VirtualCallData {
private:
+ // entries for arguments if any
TypeStackSlotEntries _args;
+ // entry for return type if any
+ ReturnTypeEntry _ret;
+
+ int cell_count_global_offset() const {
+ return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
+ }
+
+ // number of cells not counting the header
+ int cell_count_no_header() const {
+ return uint_at(cell_count_global_offset());
+ }
+
+ void check_number_of_arguments(int total) {
+ assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
+ }
+
+protected:
+ // An entry for a return value takes less space than an entry for an
+ // argument so if the number of cells exceeds the number of cells
+ // needed for an argument, this object contains type information for
+ // at least one argument.
+ bool has_arguments() const {
+ bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
+ assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
+ return res;
+ }
public:
VirtualCallTypeData(DataLayout* layout) :
- VirtualCallData(layout), _args(VirtualCallData::static_cell_count()) {
+ VirtualCallData(layout),
+ _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
+ _ret(cell_count() - ReturnTypeEntry::static_cell_count())
+ {
assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
// Some compilers (VC++) don't want this passed in member initialization list
_args.set_profile_data(this);
+ _ret.set_profile_data(this);
}
- const TypeStackSlotEntries* args() const { return &_args; }
+ const TypeStackSlotEntries* args() const {
+ assert(has_arguments(), "no profiling of arguments");
+ return &_args;
+ }
+
+ const ReturnTypeEntry* ret() const {
+ assert(has_return(), "no profiling of return value");
+ return &_ret;
+ }
virtual bool is_VirtualCallTypeData() const { return true; }
@@ -1127,39 +1253,61 @@
}
static int compute_cell_count(BytecodeStream* stream) {
- return VirtualCallData::static_cell_count() + TypeStackSlotEntries::compute_cell_count(stream);
+ return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
}
static void initialize(DataLayout* dl, int cell_count) {
- TypeStackSlotEntries::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
- }
-
- virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {
- _args.post_initialize(stream);
+ TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
}
+ virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
+
virtual int cell_count() const {
- return _args.cell_count();
+ return VirtualCallData::static_cell_count() +
+ TypeEntriesAtCall::header_cell_count() +
+ int_at_unchecked(cell_count_global_offset());
}
- uint number_of_arguments() const {
- return args()->number_of_arguments();
+ int number_of_arguments() const {
+ return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
}
void set_argument_type(int i, Klass* k) {
+ assert(has_arguments(), "no arguments!");
intptr_t current = _args.type(i);
_args.set_type(i, TypeEntries::with_status(k, current));
}
+ void set_return_type(Klass* k) {
+ assert(has_return(), "no return!");
+ intptr_t current = _ret.type();
+ _ret.set_type(TypeEntries::with_status(k, current));
+ }
+
+ // An entry for a return value takes less space than an entry for an
+ // argument, so if the remainder of the number of cells divided by
+ // the number of cells for an argument is not null, a return value
+ // is profiled in this object.
+ bool has_return() const {
+ bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
+ assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
+ return res;
+ }
+
// Code generation support
static ByteSize args_data_offset() {
- return cell_offset(VirtualCallData::static_cell_count()) + TypeStackSlotEntries::args_data_offset();
+ return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
- _args.clean_weak_klass_links(is_alive_closure);
+ if (has_arguments()) {
+ _args.clean_weak_klass_links(is_alive_closure);
+ }
+ if (has_return()) {
+ _ret.clean_weak_klass_links(is_alive_closure);
+ }
}
#ifndef PRODUCT
@@ -1691,6 +1839,9 @@
static bool profile_arguments_jsr292_only();
static bool profile_all_arguments();
static bool profile_arguments_for_invoke(methodHandle m, int bci);
+ static int profile_return_flag();
+ static bool profile_all_return();
+ static bool profile_return_for_invoke(methodHandle m, int bci);
public:
static int header_size() {
@@ -1933,6 +2084,8 @@
void verify_data_on(outputStream* st);
static bool profile_arguments();
+ static bool profile_return();
+ static bool profile_return_jsr292_only();
};
#endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
--- a/hotspot/src/share/vm/runtime/globals.hpp Fri Oct 11 19:51:31 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Sat Oct 12 12:12:59 2013 +0200
@@ -2649,8 +2649,9 @@
"Enable aggressive optimizations - see arguments.cpp") \
\
product_pd(uintx, TypeProfileLevel, \
- "Type profiling of arguments at call:" \
- "0->off ; 1->js292 only; 2->all methods") \
+ "=XY, with Y, Type profiling of arguments at call" \
+ " X, Type profiling of return value at call" \
+ "X and Y in 0->off ; 1->js292 only; 2->all methods") \
\
product(intx, TypeProfileArgsLimit, 2, \
"max number of call arguments to consider for type profiling") \