hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
changeset 13391 30245956af37
parent 12366 76be8878c0cd
child 13728 882756847a04
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Mon Jul 23 13:04:59 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Jul 24 10:51:00 2012 -0700
@@ -446,13 +446,13 @@
   const Register cache = rcx;
   const Register index = rdx;
 
-  resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
+  resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
   if (VerifyOops) {
     __ verify_oop(rax);
   }
 
   Label L_done, L_throw_exception;
-  const Register con_klass_temp = rcx;  // same as Rcache
+  const Register con_klass_temp = rcx;  // same as cache
   __ load_klass(con_klass_temp, rax);
   __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
   __ jcc(Assembler::notEqual, L_done);
@@ -2084,15 +2084,15 @@
                                             Register Rcache,
                                             Register index,
                                             size_t index_size) {
-  Register temp = rbx;
-
+  const Register temp = rbx;
   assert_different_registers(result, Rcache, index, temp);
 
   Label resolved;
-  if (byte_no == f1_oop) {
-    // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
-    // This kind of CP cache entry does not need to match the flags byte, because
+  if (byte_no == f12_oop) {
+    // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
+    // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
     // there is a 1-1 relation between bytecode type and CP entry type.
+    // The caller will also load a methodOop from f2.
     assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
     __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
     __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
@@ -2112,15 +2112,18 @@
     case Bytecodes::_getstatic      : // fall through
     case Bytecodes::_putstatic      : // fall through
     case Bytecodes::_getfield       : // fall through
-    case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
+    case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);        break;
     case Bytecodes::_invokevirtual  : // fall through
     case Bytecodes::_invokespecial  : // fall through
     case Bytecodes::_invokestatic   : // fall through
-    case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
-    case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
-    case Bytecodes::_fast_aldc      : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
-    case Bytecodes::_fast_aldc_w    : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
-    default                         : ShouldNotReachHere();                                 break;
+    case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);         break;
+    case Bytecodes::_invokehandle   : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);   break;
+    case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);  break;
+    case Bytecodes::_fast_aldc      : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);            break;
+    case Bytecodes::_fast_aldc_w    : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);            break;
+    default:
+      fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
+      break;
   }
   __ movl(temp, (int)bytecode());
   __ call_VM(noreg, entry, temp);
@@ -2149,7 +2152,7 @@
   __ movl(flags, Address(cache, index, Address::times_ptr,
            in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
 
-  // klass     overwrite register
+  // klass overwrite register
   if (is_static) {
     __ movptr(obj, Address(cache, index, Address::times_ptr,
                            in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
@@ -2161,7 +2164,7 @@
                                                Register itable_index,
                                                Register flags,
                                                bool is_invokevirtual,
-                                               bool is_invokevfinal /*unused*/,
+                                               bool is_invokevfinal, /*unused*/
                                                bool is_invokedynamic) {
   // setup registers
   const Register cache = rcx;
@@ -2171,28 +2174,33 @@
   assert_different_registers(itable_index, flags);
   assert_different_registers(itable_index, cache, index);
   // determine constant pool cache field offsets
+  assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
   const int method_offset = in_bytes(
     constantPoolCacheOopDesc::base_offset() +
-      (is_invokevirtual
+      ((byte_no == f2_byte)
        ? ConstantPoolCacheEntry::f2_offset()
-       : ConstantPoolCacheEntry::f1_offset()
-      )
-    );
+       : ConstantPoolCacheEntry::f1_offset()));
   const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
                                     ConstantPoolCacheEntry::flags_offset());
   // access constant pool cache fields
   const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
                                     ConstantPoolCacheEntry::f2_offset());
 
-  if (byte_no == f1_oop) {
-    // Resolved f1_oop goes directly into 'method' register.
-    assert(is_invokedynamic, "");
-    resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
+  if (byte_no == f12_oop) {
+    // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
+    // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
+    // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
+    size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
+    resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
+    __ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
+    itable_index = noreg;  // hack to disable load below
   } else {
     resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
     __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
   }
   if (itable_index != noreg) {
+    // pick up itable index from f2 also:
+    assert(byte_no == f1_byte, "already picked up f1");
     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
   }
   __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
@@ -2260,10 +2268,10 @@
 
   Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
 
-  __ shrl(flags, ConstantPoolCacheEntry::tosBits);
+  __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
   assert(btos == 0, "change code, btos != 0");
   // btos
-  __ andptr(flags, 0x0f);
+  __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
   __ jcc(Assembler::notZero, notByte);
 
   __ load_signed_byte(rax, lo );
@@ -2415,9 +2423,9 @@
       __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
                                    ConstantPoolCacheEntry::flags_offset())));
       __ mov(rbx, rsp);
-      __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
-      // Make sure we don't need to mask rcx for tosBits after the above shift
-      ConstantPoolCacheEntry::verify_tosBits();
+      __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift);
+      // Make sure we don't need to mask rcx after the above shift
+      ConstantPoolCacheEntry::verify_tos_state_shift();
       __ cmpl(rcx, ltos);
       __ jccb(Assembler::equal, two_word);
       __ cmpl(rcx, dtos);
@@ -2467,7 +2475,7 @@
 
   Label notVolatile, Done;
   __ movl(rdx, flags);
-  __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
+  __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
   __ andl(rdx, 0x1);
 
   // field addresses
@@ -2476,9 +2484,9 @@
 
   Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
 
-  __ shrl(flags, ConstantPoolCacheEntry::tosBits);
+  __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
   assert(btos == 0, "change code, btos != 0");
-  __ andl(flags, 0x0f);
+  __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
   __ jcc(Assembler::notZero, notByte);
 
   // btos
@@ -2719,7 +2727,7 @@
   // volatile_barrier( );
 
   Label notVolatile, Done;
-  __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
+  __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
   __ andl(rdx, 0x1);
   // Check for volatile store
   __ testl(rdx, rdx);
@@ -2885,19 +2893,29 @@
 }
 
 
-void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
+void TemplateTable::prepare_invoke(int byte_no,
+                                   Register method,  // linked method (or i-klass)
+                                   Register index,   // itable index, MethodType, etc.
+                                   Register recv,    // if caller wants to see it
+                                   Register flags    // if caller wants to test it
+                                   ) {
   // determine flags
-  Bytecodes::Code code = bytecode();
+  const Bytecodes::Code code = bytecode();
   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
+  const bool is_invokehandle     = code == Bytecodes::_invokehandle;
   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
-  const bool load_receiver      = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
-  const bool receiver_null_check = is_invokespecial;
-  const bool save_flags = is_invokeinterface || is_invokevirtual;
+  const bool load_receiver       = (recv  != noreg);
+  const bool save_flags          = (flags != noreg);
+  assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
+  assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
+  assert(flags == noreg || flags == rdx, "");
+  assert(recv  == noreg || recv  == rcx, "");
+
   // setup registers & access constant pool cache
-  const Register recv   = rcx;
-  const Register flags  = rdx;
+  if (recv  == noreg)  recv  = rcx;
+  if (flags == noreg)  flags = rdx;
   assert_different_registers(method, index, recv, flags);
 
   // save 'interpreter return address'
@@ -2905,37 +2923,43 @@
 
   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
 
+  // maybe push appendix to arguments (just before return address)
+  if (is_invokedynamic || is_invokehandle) {
+    Label L_no_push;
+    __ verify_oop(index);
+    __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
+    __ jccb(Assembler::zero, L_no_push);
+    // Push the appendix as a trailing parameter.
+    // This must be done before we get the receiver,
+    // since the parameter_size includes it.
+    __ push(index);  // push appendix (MethodType, CallSite, etc.)
+    __ bind(L_no_push);
+  }
+
   // load receiver if needed (note: no return address pushed yet)
   if (load_receiver) {
-    assert(!is_invokedynamic, "");
     __ movl(recv, flags);
-    __ andl(recv, 0xFF);
-    // recv count is 0 based?
-    Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
+    __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
+    const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
+    const int receiver_is_at_end      = -1;  // back off one slot to get receiver
+    Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
     __ movptr(recv, recv_addr);
     __ verify_oop(recv);
   }
 
-  // do null check if needed
-  if (receiver_null_check) {
-    __ null_check(recv);
-  }
-
   if (save_flags) {
     __ mov(rsi, flags);
   }
 
   // compute return type
-  __ shrl(flags, ConstantPoolCacheEntry::tosBits);
-  // Make sure we don't need to mask flags for tosBits after the above shift
-  ConstantPoolCacheEntry::verify_tosBits();
+  __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
+  // Make sure we don't need to mask flags after the above shift
+  ConstantPoolCacheEntry::verify_tos_state_shift();
   // load return address
   {
-    address table_addr;
-    if (is_invokeinterface || is_invokedynamic)
-      table_addr = (address)Interpreter::return_5_addrs_by_index_table();
-    else
-      table_addr = (address)Interpreter::return_3_addrs_by_index_table();
+    const address table_addr = (is_invokeinterface || is_invokedynamic) ?
+        (address)Interpreter::return_5_addrs_by_index_table() :
+        (address)Interpreter::return_3_addrs_by_index_table();
     ExternalAddress table(table_addr);
     __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
   }
@@ -2943,7 +2967,7 @@
   // push return address
   __ push(flags);
 
-  // Restore flag value from the constant pool cache, and restore rsi
+  // Restore flags value from the constant pool cache, and restore rsi
   // for later null checks.  rsi is the bytecode pointer
   if (save_flags) {
     __ mov(flags, rsi);
@@ -2952,22 +2976,26 @@
 }
 
 
-void TemplateTable::invokevirtual_helper(Register index, Register recv,
-                        Register flags) {
-
+void TemplateTable::invokevirtual_helper(Register index,
+                                         Register recv,
+                                         Register flags) {
   // Uses temporary registers rax, rdx
   assert_different_registers(index, recv, rax, rdx);
+  assert(index == rbx, "");
+  assert(recv  == rcx, "");
 
   // Test for an invoke of a final method
   Label notFinal;
   __ movl(rax, flags);
-  __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
+  __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
   __ jcc(Assembler::zero, notFinal);
 
-  Register method = index;  // method must be rbx,
-  assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
+  const Register method = index;  // method must be rbx
+  assert(method == rbx,
+         "methodOop must be rbx for interpreter calling convention");
 
   // do the call - the index is actually the method to call
+  // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
   __ verify_oop(method);
 
   // It's final, need a null check here!
@@ -2982,7 +3010,6 @@
 
   // get receiver klass
   __ null_check(recv, oopDesc::klass_offset_in_bytes());
-  // Keep recv in rcx for callee expects it there
   __ load_klass(rax, recv);
   __ verify_oop(rax);
 
@@ -2990,9 +3017,7 @@
   __ profile_virtual_call(rax, rdi, rdx);
 
   // get target methodOop & entry point
-  const int base = instanceKlass::vtable_start_offset() * wordSize;
-  assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
-  __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
+  __ lookup_virtual_method(rax, index, method);
   __ jump_from_interpreted(method, rdx);
 }
 
@@ -3000,9 +3025,12 @@
 void TemplateTable::invokevirtual(int byte_no) {
   transition(vtos, vtos);
   assert(byte_no == f2_byte, "use this argument");
-  prepare_invoke(rbx, noreg, byte_no);
-
-  // rbx,: index
+  prepare_invoke(byte_no,
+                 rbx,    // method or vtable index
+                 noreg,  // unused itable index
+                 rcx, rdx); // recv, flags
+
+  // rbx: index
   // rcx: receiver
   // rdx: flags
 
@@ -3013,7 +3041,10 @@
 void TemplateTable::invokespecial(int byte_no) {
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
-  prepare_invoke(rbx, noreg, byte_no);
+  prepare_invoke(byte_no, rbx, noreg,  // get f1 methodOop
+                 rcx);  // get receiver also for null check
+  __ verify_oop(rcx);
+  __ null_check(rcx);
   // do the call
   __ verify_oop(rbx);
   __ profile_call(rax);
@@ -3024,7 +3055,7 @@
 void TemplateTable::invokestatic(int byte_no) {
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
-  prepare_invoke(rbx, noreg, byte_no);
+  prepare_invoke(byte_no, rbx);  // get f1 methodOop
   // do the call
   __ verify_oop(rbx);
   __ profile_call(rax);
@@ -3042,10 +3073,11 @@
 void TemplateTable::invokeinterface(int byte_no) {
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
-  prepare_invoke(rax, rbx, byte_no);
-
-  // rax,: Interface
-  // rbx,: index
+  prepare_invoke(byte_no, rax, rbx,  // get f1 klassOop, f2 itable index
+                 rcx, rdx); // recv, flags
+
+  // rax: interface klass (from f1)
+  // rbx: itable index (from f2)
   // rcx: receiver
   // rdx: flags
 
@@ -3055,7 +3087,7 @@
   // another compliant java compiler.
   Label notMethod;
   __ movl(rdi, rdx);
-  __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
+  __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
   __ jcc(Assembler::zero, notMethod);
 
   invokevirtual_helper(rbx, rcx, rdx);
@@ -3063,6 +3095,7 @@
 
   // Get receiver klass into rdx - also a null check
   __ restore_locals();  // restore rdi
+  __ null_check(rcx, oopDesc::klass_offset_in_bytes());
   __ load_klass(rdx, rcx);
   __ verify_oop(rdx);
 
@@ -3077,7 +3110,7 @@
                              rbx, rsi,
                              no_such_interface);
 
-  // rbx,: methodOop to call
+  // rbx: methodOop to call
   // rcx: receiver
   // Check for abstract method error
   // Note: This should be done more efficiently via a throw_abstract_method_error
@@ -3116,9 +3149,39 @@
   __ should_not_reach_here();
 }
 
+void TemplateTable::invokehandle(int byte_no) {
+  transition(vtos, vtos);
+  assert(byte_no == f12_oop, "use this argument");
+  const Register rbx_method = rbx;  // (from f2)
+  const Register rax_mtype  = rax;  // (from f1)
+  const Register rcx_recv   = rcx;
+  const Register rdx_flags  = rdx;
+
+  if (!EnableInvokeDynamic) {
+    // rewriter does not generate this bytecode
+    __ should_not_reach_here();
+    return;
+  }
+
+  prepare_invoke(byte_no,
+                 rbx_method, rax_mtype,  // get f2 methodOop, f1 MethodType
+                 rcx_recv);
+  __ verify_oop(rbx_method);
+  __ verify_oop(rcx_recv);
+  __ null_check(rcx_recv);
+
+  // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
+
+  // FIXME: profile the LambdaForm also
+  __ profile_final_call(rax);
+
+  __ jump_from_interpreted(rbx_method, rdx);
+}
+
+
 void TemplateTable::invokedynamic(int byte_no) {
   transition(vtos, vtos);
-  assert(byte_no == f1_oop, "use this argument");
+  assert(byte_no == f12_oop, "use this argument");
 
   if (!EnableInvokeDynamic) {
     // We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3131,26 +3194,23 @@
     return;
   }
 
-  prepare_invoke(rax, rbx, byte_no);
-
-  // rax: CallSite object (f1)
-  // rbx: unused (f2)
-  // rcx: receiver address
-  // rdx: flags (unused)
-
-  Register rax_callsite      = rax;
-  Register rcx_method_handle = rcx;
+  const Register rbx_method   = rbx;
+  const Register rax_callsite = rax;
+
+  prepare_invoke(byte_no, rbx_method, rax_callsite);
+
+  // rax: CallSite object (from f1)
+  // rbx: MH.linkToCallSite method (from f2)
+
+  // Note:  rax_callsite is already pushed by prepare_invoke
 
   // %%% should make a type profile for any invokedynamic that takes a ref argument
   // profile this call
   __ profile_call(rsi);
 
   __ verify_oop(rax_callsite);
-  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
-  __ null_check(rcx_method_handle);
-  __ verify_oop(rcx_method_handle);
-  __ prepare_to_jump_from_interpreted();
-  __ jump_to_method_handle_entry(rcx_method_handle, rdx);
+
+  __ jump_from_interpreted(rbx_method, rdx);
 }
 
 //----------------------------------------------------------------------------------------------------