--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Mon Jul 23 13:04:59 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Tue Jul 24 10:51:00 2012 -0700
@@ -458,7 +458,7 @@
const Register cache = rcx;
const Register index = rdx;
- resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
+ resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
if (VerifyOops) {
__ verify_oop(rax);
}
@@ -2125,10 +2125,11 @@
assert_different_registers(result, Rcache, index, temp);
Label resolved;
- if (byte_no == f1_oop) {
- // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
- // This kind of CP cache entry does not need to match the flags byte, because
+ if (byte_no == f12_oop) {
+ // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
+ // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
// there is a 1-1 relation between bytecode type and CP entry type.
+ // The caller will also load a methodOop from f2.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
@@ -2157,6 +2158,9 @@
case Bytecodes::_invokeinterface:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
break;
+ case Bytecodes::_invokehandle:
+ entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
+ break;
case Bytecodes::_invokedynamic:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
break;
@@ -2167,7 +2171,7 @@
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
break;
default:
- ShouldNotReachHere();
+ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break;
}
__ movl(temp, (int) bytecode());
@@ -2180,7 +2184,7 @@
__ bind(resolved);
}
-// The Rcache and index registers must be set before call
+// The cache and index registers must be set before call
void TemplateTable::load_field_cp_cache_entry(Register obj,
Register cache,
Register index,
@@ -2191,17 +2195,17 @@
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
// Field offset
- __ movptr(off, Address(cache, index, Address::times_8,
+ __ movptr(off, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f2_offset())));
// Flags
- __ movl(flags, Address(cache, index, Address::times_8,
+ __ movl(flags, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset())));
// klass overwrite register
if (is_static) {
- __ movptr(obj, Address(cache, index, Address::times_8,
+ __ movptr(obj, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f1_offset())));
}
@@ -2222,9 +2226,10 @@
assert_different_registers(itable_index, flags);
assert_different_registers(itable_index, cache, index);
// determine constant pool cache field offsets
+ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes(
constantPoolCacheOopDesc::base_offset() +
- (is_invokevirtual
+ ((byte_no == f2_byte)
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
@@ -2233,15 +2238,21 @@
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset());
- if (byte_no == f1_oop) {
- // Resolved f1_oop goes directly into 'method' register.
- assert(is_invokedynamic, "");
- resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
+ if (byte_no == f12_oop) {
+ // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
+ // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
+ // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
+ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
+ resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
+ __ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
+ itable_index = noreg; // hack to disable load below
} else {
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
}
if (itable_index != noreg) {
+ // pick up itable index from f2 also:
+ assert(byte_no == f1_byte, "already picked up f1");
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
@@ -2317,10 +2328,11 @@
Label Done, notByte, notInt, notShort, notChar,
notLong, notFloat, notObj, notDouble;
- __ shrl(flags, ConstantPoolCacheEntry::tosBits);
+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
+ // Make sure we don't need to mask edx after the above shift
assert(btos == 0, "change code, btos != 0");
- __ andl(flags, 0x0F);
+ __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
__ jcc(Assembler::notZero, notByte);
// btos
__ load_signed_byte(rax, field);
@@ -2466,10 +2478,9 @@
Address::times_8,
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset())));
- __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits);
- // Make sure we don't need to mask rcx for tosBits after the
- // above shift
- ConstantPoolCacheEntry::verify_tosBits();
+ __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
+ // Make sure we don't need to mask rcx after the above shift
+ ConstantPoolCacheEntry::verify_tos_state_shift();
__ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
__ cmpl(c_rarg3, ltos);
__ cmovptr(Assembler::equal,
@@ -2516,7 +2527,7 @@
Label notVolatile, Done;
__ movl(rdx, flags);
- __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
+ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
__ andl(rdx, 0x1);
// field address
@@ -2525,10 +2536,10 @@
Label notByte, notInt, notShort, notChar,
notLong, notFloat, notObj, notDouble;
- __ shrl(flags, ConstantPoolCacheEntry::tosBits);
+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
assert(btos == 0, "change code, btos != 0");
- __ andl(flags, 0x0f);
+ __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
__ jcc(Assembler::notZero, notByte);
// btos
@@ -2751,7 +2762,7 @@
// Assembler::StoreStore));
Label notVolatile;
- __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
+ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
__ andl(rdx, 0x1);
// Get object from stack
@@ -2832,7 +2843,7 @@
// __ movl(rdx, Address(rcx, rbx, Address::times_8,
// in_bytes(constantPoolCacheOopDesc::base_offset() +
// ConstantPoolCacheEntry::flags_offset())));
- // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
+ // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
// __ andl(rdx, 0x1);
// }
__ movptr(rbx, Address(rcx, rbx, Address::times_8,
@@ -2920,7 +2931,7 @@
// __ movl(rdx, Address(rcx, rdx, Address::times_8,
// in_bytes(constantPoolCacheOopDesc::base_offset() +
// ConstantPoolCacheEntry::flags_offset())));
- // __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
+ // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
// __ testl(rdx, 0x1);
// __ jcc(Assembler::zero, notVolatile);
// __ membar(Assembler::LoadLoad);
@@ -2940,19 +2951,29 @@
ShouldNotReachHere();
}
-void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
+void TemplateTable::prepare_invoke(int byte_no,
+ Register method, // linked method (or i-klass)
+ Register index, // itable index, MethodType, etc.
+ Register recv, // if caller wants to see it
+ Register flags // if caller wants to test it
+ ) {
// determine flags
- Bytecodes::Code code = bytecode();
+ const Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
+ const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
- const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
- const bool receiver_null_check = is_invokespecial;
- const bool save_flags = is_invokeinterface || is_invokevirtual;
+ const bool load_receiver = (recv != noreg);
+ const bool save_flags = (flags != noreg);
+ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
+ assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
+ assert(flags == noreg || flags == rdx, "");
+ assert(recv == noreg || recv == rcx, "");
+
// setup registers & access constant pool cache
- const Register recv = rcx;
- const Register flags = rdx;
+ if (recv == noreg) recv = rcx;
+ if (flags == noreg) flags = rdx;
assert_different_registers(method, index, recv, flags);
// save 'interpreter return address'
@@ -2960,36 +2981,44 @@
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
- // load receiver if needed (note: no return address pushed yet)
+ // maybe push appendix to arguments (just before return address)
+ if (is_invokedynamic || is_invokehandle) {
+ Label L_no_push;
+ __ verify_oop(index);
+ __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
+ __ jccb(Assembler::zero, L_no_push);
+ // Push the appendix as a trailing parameter.
+ // This must be done before we get the receiver,
+ // since the parameter_size includes it.
+ __ push(index); // push appendix (MethodType, CallSite, etc.)
+ __ bind(L_no_push);
+ }
+
+ // load receiver if needed (after appendix is pushed so parameter size is correct)
+ // Note: no return address pushed yet
if (load_receiver) {
- assert(!is_invokedynamic, "");
__ movl(recv, flags);
- __ andl(recv, 0xFF);
- Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
+ __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
+ const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
+ const int receiver_is_at_end = -1; // back off one slot to get receiver
+ Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
- // do null check if needed
- if (receiver_null_check) {
- __ null_check(recv);
- }
-
if (save_flags) {
__ movl(r13, flags);
}
// compute return type
- __ shrl(flags, ConstantPoolCacheEntry::tosBits);
- // Make sure we don't need to mask flags for tosBits after the above shift
- ConstantPoolCacheEntry::verify_tosBits();
+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
+ // Make sure we don't need to mask flags after the above shift
+ ConstantPoolCacheEntry::verify_tos_state_shift();
// load return address
{
- address table_addr;
- if (is_invokeinterface || is_invokedynamic)
- table_addr = (address)Interpreter::return_5_addrs_by_index_table();
- else
- table_addr = (address)Interpreter::return_3_addrs_by_index_table();
+ const address table_addr = (is_invokeinterface || is_invokedynamic) ?
+ (address)Interpreter::return_5_addrs_by_index_table() :
+ (address)Interpreter::return_3_addrs_by_index_table();
ExternalAddress table(table_addr);
__ lea(rscratch1, table);
__ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
@@ -2998,7 +3027,7 @@
// push return address
__ push(flags);
- // Restore flag field from the constant pool cache, and restore esi
+ // Restore flags value from the constant pool cache, and restore rsi
// for later null checks. r13 is the bytecode pointer
if (save_flags) {
__ movl(flags, r13);
@@ -3012,11 +3041,13 @@
Register flags) {
// Uses temporary registers rax, rdx
assert_different_registers(index, recv, rax, rdx);
+ assert(index == rbx, "");
+ assert(recv == rcx, "");
// Test for an invoke of a final method
Label notFinal;
__ movl(rax, flags);
- __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
+ __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
__ jcc(Assembler::zero, notFinal);
const Register method = index; // method must be rbx
@@ -3024,6 +3055,7 @@
"methodOop must be rbx for interpreter calling convention");
// do the call - the index is actually the method to call
+ // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
__ verify_oop(method);
// It's final, need a null check here!
@@ -3039,20 +3071,13 @@
// get receiver klass
__ null_check(recv, oopDesc::klass_offset_in_bytes());
__ load_klass(rax, recv);
-
__ verify_oop(rax);
// profile this call
__ profile_virtual_call(rax, r14, rdx);
// get target methodOop & entry point
- const int base = instanceKlass::vtable_start_offset() * wordSize;
- assert(vtableEntry::size() * wordSize == 8,
- "adjust the scaling in the code below");
- __ movptr(method, Address(rax, index,
- Address::times_8,
- base + vtableEntry::method_offset_in_bytes()));
- __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
+ __ lookup_virtual_method(rax, index, method);
__ jump_from_interpreted(method, rdx);
}
@@ -3060,7 +3085,10 @@
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
- prepare_invoke(rbx, noreg, byte_no);
+ prepare_invoke(byte_no,
+ rbx, // method or vtable index
+ noreg, // unused itable index
+ rcx, rdx); // recv, flags
// rbx: index
// rcx: receiver
@@ -3073,7 +3101,10 @@
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(rbx, noreg, byte_no);
+ prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop
+ rcx); // get receiver also for null check
+ __ verify_oop(rcx);
+ __ null_check(rcx);
// do the call
__ verify_oop(rbx);
__ profile_call(rax);
@@ -3084,7 +3115,7 @@
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(rbx, noreg, byte_no);
+ prepare_invoke(byte_no, rbx); // get f1 methodOop
// do the call
__ verify_oop(rbx);
__ profile_call(rax);
@@ -3100,10 +3131,11 @@
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(rax, rbx, byte_no);
-
- // rax: Interface
- // rbx: index
+ prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index
+ rcx, rdx); // recv, flags
+
+ // rax: interface klass (from f1)
+ // rbx: itable index (from f2)
// rcx: receiver
// rdx: flags
@@ -3113,14 +3145,15 @@
// another compliant java compiler.
Label notMethod;
__ movl(r14, rdx);
- __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface));
+ __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
__ jcc(Assembler::zero, notMethod);
invokevirtual_helper(rbx, rcx, rdx);
__ bind(notMethod);
// Get receiver klass into rdx - also a null check
- __ restore_locals(); // restore r14
+ __ restore_locals(); // restore r14
+ __ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx, rcx);
__ verify_oop(rdx);
@@ -3135,7 +3168,7 @@
rbx, r13,
no_such_interface);
- // rbx,: methodOop to call
+ // rbx: methodOop to call
// rcx: receiver
// Check for abstract method error
// Note: This should be done more efficiently via a throw_abstract_method_error
@@ -3172,12 +3205,42 @@
InterpreterRuntime::throw_IncompatibleClassChangeError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
- return;
}
+
+void TemplateTable::invokehandle(int byte_no) {
+ transition(vtos, vtos);
+ assert(byte_no == f12_oop, "use this argument");
+ const Register rbx_method = rbx; // f2
+ const Register rax_mtype = rax; // f1
+ const Register rcx_recv = rcx;
+ const Register rdx_flags = rdx;
+
+ if (!EnableInvokeDynamic) {
+ // rewriter does not generate this bytecode
+ __ should_not_reach_here();
+ return;
+ }
+
+ prepare_invoke(byte_no,
+ rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType
+ rcx_recv);
+ __ verify_oop(rbx_method);
+ __ verify_oop(rcx_recv);
+ __ null_check(rcx_recv);
+
+ // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
+
+ // FIXME: profile the LambdaForm also
+ __ profile_final_call(rax);
+
+ __ jump_from_interpreted(rbx_method, rdx);
+}
+
+
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
- assert(byte_no == f1_oop, "use this argument");
+ assert(byte_no == f12_oop, "use this argument");
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3190,26 +3253,23 @@
return;
}
- prepare_invoke(rax, rbx, byte_no);
-
- // rax: CallSite object (f1)
- // rbx: unused (f2)
- // rcx: receiver address
- // rdx: flags (unused)
-
- Register rax_callsite = rax;
- Register rcx_method_handle = rcx;
+ const Register rbx_method = rbx;
+ const Register rax_callsite = rax;
+
+ prepare_invoke(byte_no, rbx_method, rax_callsite);
+
+ // rax: CallSite object (from f1)
+ // rbx: MH.linkToCallSite method (from f2)
+
+ // Note: rax_callsite is already pushed by prepare_invoke
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
__ profile_call(r13);
__ verify_oop(rax_callsite);
- __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
- __ null_check(rcx_method_handle);
- __ verify_oop(rcx_method_handle);
- __ prepare_to_jump_from_interpreted();
- __ jump_to_method_handle_entry(rcx_method_handle, rdx);
+
+ __ jump_from_interpreted(rbx_method, rdx);
}