hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
changeset 2534 08dac9ce0cd7
child 3262 30d1c247fc25
equal deleted inserted replaced
2533:9aa50ba9a67f 2534:08dac9ce0cd7
       
     1 /*
       
     2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
       
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
       
    21  * have any questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "incls/_precompiled.incl"
       
    26 #include "incls/_methodHandles_x86.cpp.incl"
       
    27 
       
    28 #define __ _masm->
       
    29 
       
    30 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
       
    31                                                 address interpreted_entry) {
       
    32   // Just before the actual machine code entry point, allocate space
       
    33   // for a MethodHandleEntry::Data record, so that we can manage everything
       
    34   // from one base pointer.
       
    35   __ align(wordSize);
       
    36   address target = __ pc() + sizeof(Data);
       
    37   while (__ pc() < target) {
       
    38     __ nop();
       
    39     __ align(wordSize);
       
    40   }
       
    41 
       
    42   MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
       
    43   me->set_end_address(__ pc());         // set a temporary end_address
       
    44   me->set_from_interpreted_entry(interpreted_entry);
       
    45   me->set_type_checking_entry(NULL);
       
    46 
       
    47   return (address) me;
       
    48 }
       
    49 
       
    50 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
       
    51                                                 address start_addr) {
       
    52   MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
       
    53   assert(me->end_address() == start_addr, "valid ME");
       
    54 
       
    55   // Fill in the real end_address:
       
    56   __ align(wordSize);
       
    57   me->set_end_address(__ pc());
       
    58 
       
    59   return me;
       
    60 }
       
    61 
       
    62 #ifdef ASSERT
       
    63 static void verify_argslot(MacroAssembler* _masm, Register rax_argslot,
       
    64                            const char* error_message) {
       
    65   // Verify that argslot lies within (rsp, rbp].
       
    66   Label L_ok, L_bad;
       
    67   __ cmpptr(rax_argslot, rbp);
       
    68   __ jcc(Assembler::above, L_bad);
       
    69   __ cmpptr(rsp, rax_argslot);
       
    70   __ jcc(Assembler::below, L_ok);
       
    71   __ bind(L_bad);
       
    72   __ stop(error_message);
       
    73   __ bind(L_ok);
       
    74 }
       
    75 #endif
       
    76 
       
    77 
       
    78 // Code generation
       
    79 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
       
    80   // rbx: methodOop
       
    81   // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
       
    82   // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
       
    83   // rdx: garbage temp, blown away
       
    84 
       
    85   Register rbx_method = rbx;
       
    86   Register rcx_recv   = rcx;
       
    87   Register rax_mtype  = rax;
       
    88   Register rdx_temp   = rdx;
       
    89 
       
    90   // emit WrongMethodType path first, to enable jccb back-branch from main path
       
    91   Label wrong_method_type;
       
    92   __ bind(wrong_method_type);
       
    93   __ push(rax_mtype);       // required mtype
       
    94   __ push(rcx_recv);        // bad mh (1st stacked argument)
       
    95   __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
       
    96 
       
    97   // here's where control starts out:
       
    98   __ align(CodeEntryAlignment);
       
    99   address entry_point = __ pc();
       
   100 
       
   101   // fetch the MethodType from the method handle into rax (the 'check' register)
       
   102   {
       
   103     Register tem = rbx_method;
       
   104     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
       
   105       __ movptr(rax_mtype, Address(tem, *pchase));
       
   106       tem = rax_mtype;          // in case there is another indirection
       
   107     }
       
   108   }
       
   109   Register rbx_temp = rbx_method; // done with incoming methodOop
       
   110 
       
   111   // given the MethodType, find out where the MH argument is buried
       
   112   __ movptr(rdx_temp, Address(rax_mtype,
       
   113                               __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp)));
       
   114   __ movl(rdx_temp, Address(rdx_temp,
       
   115                             __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp)));
       
   116   __ movptr(rcx_recv, __ argument_address(rdx_temp));
       
   117 
       
   118   __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type);
       
   119   __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   120 
       
   121   return entry_point;
       
   122 }
       
   123 
       
   124 // Helper to insert argument slots into the stack.
       
   125 // arg_slots must be a multiple of stack_move_unit() and <= 0
       
   126 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
       
   127                                      RegisterOrConstant arg_slots,
       
   128                                      int arg_mask,
       
   129                                      Register rax_argslot,
       
   130                                      Register rbx_temp, Register rdx_temp) {
       
   131   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
       
   132                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
       
   133 
       
   134 #ifdef ASSERT
       
   135   verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
       
   136   if (arg_slots.is_register()) {
       
   137     Label L_ok, L_bad;
       
   138     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
       
   139     __ jcc(Assembler::greater, L_bad);
       
   140     __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
       
   141     __ jcc(Assembler::zero, L_ok);
       
   142     __ bind(L_bad);
       
   143     __ stop("assert arg_slots <= 0 and clear low bits");
       
   144     __ bind(L_ok);
       
   145   } else {
       
   146     assert(arg_slots.as_constant() <= 0, "");
       
   147     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
       
   148   }
       
   149 #endif //ASSERT
       
   150 
       
   151 #ifdef _LP64
       
   152   if (arg_slots.is_register()) {
       
   153     // clean high bits of stack motion register (was loaded as an int)
       
   154     __ movslq(arg_slots.as_register(), arg_slots.as_register());
       
   155   }
       
   156 #endif
       
   157 
       
   158   // Make space on the stack for the inserted argument(s).
       
   159   // Then pull down everything shallower than rax_argslot.
       
   160   // The stacked return address gets pulled down with everything else.
       
   161   // That is, copy [rsp, argslot) downward by -size words.  In pseudo-code:
       
   162   //   rsp -= size;
       
   163   //   for (rdx = rsp + size; rdx < argslot; rdx++)
       
   164   //     rdx[-size] = rdx[0]
       
   165   //   argslot -= size;
       
   166   __ mov(rdx_temp, rsp);                        // source pointer for copy
       
   167   __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
       
   168   {
       
   169     Label loop;
       
   170     __ bind(loop);
       
   171     // pull one word down each time through the loop
       
   172     __ movptr(rbx_temp, Address(rdx_temp, 0));
       
   173     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
       
   174     __ addptr(rdx_temp, wordSize);
       
   175     __ cmpptr(rdx_temp, rax_argslot);
       
   176     __ jcc(Assembler::less, loop);
       
   177   }
       
   178 
       
   179   // Now move the argslot down, to point to the opened-up space.
       
   180   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
       
   181 
       
   182   if (TaggedStackInterpreter && arg_mask != _INSERT_NO_MASK) {
       
   183     // The caller has specified a bitmask of tags to put into the opened space.
       
   184     // This only works when the arg_slots value is an assembly-time constant.
       
   185     int constant_arg_slots = arg_slots.as_constant() / stack_move_unit();
       
   186     int tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
       
   187     for (int slot = 0; slot < constant_arg_slots; slot++) {
       
   188       BasicType slot_type   = ((arg_mask & (1 << slot)) == 0 ? T_OBJECT : T_INT);
       
   189       int       slot_offset = Interpreter::stackElementSize() * slot;
       
   190       Address   tag_addr(rax_argslot, slot_offset + tag_offset);
       
   191       __ movptr(tag_addr, frame::tag_for_basic_type(slot_type));
       
   192     }
       
   193     // Note that the new argument slots are tagged properly but contain
       
   194     // garbage at this point.  The value portions must be initialized
       
   195     // by the caller.  (Especially references!)
       
   196   }
       
   197 }
       
   198 
       
   199 // Helper to remove argument slots from the stack.
       
   200 // arg_slots must be a multiple of stack_move_unit() and >= 0
       
   201 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
       
   202                                     RegisterOrConstant arg_slots,
       
   203                                     Register rax_argslot,
       
   204                                     Register rbx_temp, Register rdx_temp) {
       
   205   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
       
   206                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
       
   207 
       
   208 #ifdef ASSERT
       
   209   {
       
   210     // Verify that [argslot..argslot+size) lies within (rsp, rbp).
       
   211     Label L_ok, L_bad;
       
   212     __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
       
   213     __ cmpptr(rbx_temp, rbp);
       
   214     __ jcc(Assembler::above, L_bad);
       
   215     __ cmpptr(rsp, rax_argslot);
       
   216     __ jcc(Assembler::below, L_ok);
       
   217     __ bind(L_bad);
       
   218     __ stop("deleted argument(s) must fall within current frame");
       
   219     __ bind(L_ok);
       
   220   }
       
   221   if (arg_slots.is_register()) {
       
   222     Label L_ok, L_bad;
       
   223     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
       
   224     __ jcc(Assembler::less, L_bad);
       
   225     __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
       
   226     __ jcc(Assembler::zero, L_ok);
       
   227     __ bind(L_bad);
       
   228     __ stop("assert arg_slots >= 0 and clear low bits");
       
   229     __ bind(L_ok);
       
   230   } else {
       
   231     assert(arg_slots.as_constant() >= 0, "");
       
   232     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
       
   233   }
       
   234 #endif //ASSERT
       
   235 
       
   236 #ifdef _LP64
       
   237   if (false) {                  // not needed, since register is positive
       
   238     // clean high bits of stack motion register (was loaded as an int)
       
   239     if (arg_slots.is_register())
       
   240       __ movslq(arg_slots.as_register(), arg_slots.as_register());
       
   241   }
       
   242 #endif
       
   243 
       
   244   // Pull up everything shallower than rax_argslot.
       
   245   // Then remove the excess space on the stack.
       
   246   // The stacked return address gets pulled up with everything else.
       
   247   // That is, copy [rsp, argslot) upward by size words.  In pseudo-code:
       
   248   //   for (rdx = argslot-1; rdx >= rsp; --rdx)
       
   249   //     rdx[size] = rdx[0]
       
   250   //   argslot += size;
       
   251   //   rsp += size;
       
   252   __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
       
   253   {
       
   254     Label loop;
       
   255     __ bind(loop);
       
   256     // pull one word up each time through the loop
       
   257     __ movptr(rbx_temp, Address(rdx_temp, 0));
       
   258     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
       
   259     __ addptr(rdx_temp, -wordSize);
       
   260     __ cmpptr(rdx_temp, rsp);
       
   261     __ jcc(Assembler::greaterEqual, loop);
       
   262   }
       
   263 
       
   264   // Now move the argslot up, to point to the just-copied block.
       
   265   __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
       
   266   // And adjust the argslot address to point at the deletion point.
       
   267   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
       
   268 }
       
   269 
       
   270 #ifndef PRODUCT
       
   271 void trace_method_handle_stub(const char* adaptername,
       
   272                               oop mh,
       
   273                               intptr_t* entry_sp,
       
   274                               intptr_t* saved_sp) {
       
   275   // called as a leaf from native code: do not block the JVM!
       
   276   printf("MH %s "PTR_FORMAT" "PTR_FORMAT" "INTX_FORMAT"\n", adaptername, mh, entry_sp, entry_sp - saved_sp);
       
   277 }
       
   278 #endif //PRODUCT
       
   279 
       
   280 // Generate an "entry" field for a method handle.
       
   281 // This determines how the method handle will respond to calls.
       
   282 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
       
   283   // Here is the register state during an interpreted call,
       
   284   // as set up by generate_method_handle_interpreter_entry():
       
   285   // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
       
   286   // - rcx: receiver method handle
       
   287   // - rax: method handle type (only used by the check_mtype entry point)
       
   288   // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
       
   289   // - rdx: garbage temp, can blow away
       
   290 
       
   291   Register rcx_recv    = rcx;
       
   292   Register rax_argslot = rax;
       
   293   Register rbx_temp    = rbx;
       
   294   Register rdx_temp    = rdx;
       
   295 
       
   296   guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
       
   297 
       
   298   // some handy addresses
       
   299   Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
       
   300 
       
   301   Address rcx_mh_vmtarget(    rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() );
       
   302   Address rcx_dmh_vmindex(    rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() );
       
   303 
       
   304   Address rcx_bmh_vmargslot(  rcx_recv, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes() );
       
   305   Address rcx_bmh_argument(   rcx_recv, sun_dyn_BoundMethodHandle::argument_offset_in_bytes() );
       
   306 
       
   307   Address rcx_amh_vmargslot(  rcx_recv, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes() );
       
   308   Address rcx_amh_argument(   rcx_recv, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes() );
       
   309   Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
       
   310   Address vmarg;                // __ argument_address(vmargslot)
       
   311 
       
   312   int tag_offset = -1;
       
   313   if (TaggedStackInterpreter) {
       
   314     tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
       
   315     assert(tag_offset = wordSize, "stack grows as expected");
       
   316   }
       
   317 
       
   318   if (have_entry(ek)) {
       
   319     __ nop();                   // empty stubs make SG sick
       
   320     return;
       
   321   }
       
   322 
       
   323   address interp_entry = __ pc();
       
   324   if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
       
   325 
       
   326 #ifndef PRODUCT
       
   327   if (TraceMethodHandles) {
       
   328     __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi);
       
   329     __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
       
   330     // arguments:
       
   331     __ push(rsi);               // saved_sp
       
   332     __ push(rax);               // entry_sp
       
   333     __ push(rcx);               // mh
       
   334     __ push(rcx);
       
   335     __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek));
       
   336     __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 4);
       
   337     __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax);
       
   338   }
       
   339 #endif //PRODUCT
       
   340 
       
   341   switch ((int) ek) {
       
   342   case _check_mtype:
       
   343     {
       
   344       // this stub is special, because it requires a live mtype argument
       
   345       Register rax_mtype = rax;
       
   346 
       
   347       // emit WrongMethodType path first, to enable jccb back-branch
       
   348       Label wrong_method_type;
       
   349       __ bind(wrong_method_type);
       
   350       __ movptr(rdx_temp, ExternalAddress((address) &_entries[_wrong_method_type]));
       
   351       __ jmp(Address(rdx_temp, MethodHandleEntry::from_interpreted_entry_offset_in_bytes()));
       
   352       __ hlt();
       
   353 
       
   354       interp_entry = __ pc();
       
   355       __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type);
       
   356       // now rax_mtype is dead; subsequent stubs will use it as a temp
       
   357 
       
   358       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   359     }
       
   360     break;
       
   361 
       
   362   case _wrong_method_type:
       
   363     {
       
   364       // this stub is special, because it requires a live mtype argument
       
   365       Register rax_mtype = rax;
       
   366 
       
   367       interp_entry = __ pc();
       
   368       __ push(rax_mtype);       // required mtype
       
   369       __ push(rcx_recv);        // random mh (1st stacked argument)
       
   370       __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
       
   371     }
       
   372     break;
       
   373 
       
   374   case _invokestatic_mh:
       
   375   case _invokespecial_mh:
       
   376     {
       
   377       Register rbx_method = rbx_temp;
       
   378       __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop
       
   379       __ verify_oop(rbx_method);
       
   380       // same as TemplateTable::invokestatic or invokespecial,
       
   381       // minus the CP setup and profiling:
       
   382       if (ek == _invokespecial_mh) {
       
   383         // Must load & check the first argument before entering the target method.
       
   384         __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
       
   385         __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
       
   386         __ null_check(rcx_recv);
       
   387         __ verify_oop(rcx_recv);
       
   388       }
       
   389       __ jmp(rbx_method_fie);
       
   390     }
       
   391     break;
       
   392 
       
   393   case _invokevirtual_mh:
       
   394     {
       
   395       // same as TemplateTable::invokevirtual,
       
   396       // minus the CP setup and profiling:
       
   397 
       
   398       // pick out the vtable index and receiver offset from the MH,
       
   399       // and then we can discard it:
       
   400       __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
       
   401       Register rbx_index = rbx_temp;
       
   402       __ movl(rbx_index, rcx_dmh_vmindex);
       
   403       // Note:  The verifier allows us to ignore rcx_mh_vmtarget.
       
   404       __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
       
   405       __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
       
   406 
       
   407       // get receiver klass
       
   408       Register rax_klass = rax_argslot;
       
   409       __ load_klass(rax_klass, rcx_recv);
       
   410       __ verify_oop(rax_klass);
       
   411 
       
   412       // get target methodOop & entry point
       
   413       const int base = instanceKlass::vtable_start_offset() * wordSize;
       
   414       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
       
   415       Address vtable_entry_addr(rax_klass,
       
   416                                 rbx_index, Address::times_ptr,
       
   417                                 base + vtableEntry::method_offset_in_bytes());
       
   418       Register rbx_method = rbx_temp;
       
   419       __ movl(rbx_method, vtable_entry_addr);
       
   420 
       
   421       __ verify_oop(rbx_method);
       
   422       __ jmp(rbx_method_fie);
       
   423     }
       
   424     break;
       
   425 
       
   426   case _invokeinterface_mh:
       
   427     {
       
   428       // same as TemplateTable::invokeinterface,
       
   429       // minus the CP setup and profiling:
       
   430 
       
   431       // pick out the interface and itable index from the MH.
       
   432       __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
       
   433       Register rdx_intf  = rdx_temp;
       
   434       Register rbx_index = rbx_temp;
       
   435       __ movptr(rdx_intf,  rcx_mh_vmtarget);
       
   436       __ movl(rbx_index,   rcx_dmh_vmindex);
       
   437       __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
       
   438       __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
       
   439 
       
   440       // get receiver klass
       
   441       Register rax_klass = rax_argslot;
       
   442       __ load_klass(rax_klass, rcx_recv);
       
   443       __ verify_oop(rax_klass);
       
   444 
       
   445       Register rcx_temp   = rcx_recv;
       
   446       Register rbx_method = rbx_index;
       
   447 
       
   448       // get interface klass
       
   449       Label no_such_interface;
       
   450       __ verify_oop(rdx_intf);
       
   451       __ lookup_interface_method(rax_klass, rdx_intf,
       
   452                                  // note: next two args must be the same:
       
   453                                  rbx_index, rbx_method,
       
   454                                  rcx_temp,
       
   455                                  no_such_interface);
       
   456 
       
   457       __ verify_oop(rbx_method);
       
   458       __ jmp(rbx_method_fie);
       
   459       __ hlt();
       
   460 
       
   461       __ bind(no_such_interface);
       
   462       // Throw an exception.
       
   463       // For historical reasons, it will be IncompatibleClassChangeError.
       
   464       __ should_not_reach_here(); // %%% FIXME NYI
       
   465     }
       
   466     break;
       
   467 
       
   468   case _bound_ref_mh:
       
   469   case _bound_int_mh:
       
   470   case _bound_long_mh:
       
   471   case _bound_ref_direct_mh:
       
   472   case _bound_int_direct_mh:
       
   473   case _bound_long_direct_mh:
       
   474     {
       
   475       bool direct_to_method = (ek >= _bound_ref_direct_mh);
       
   476       BasicType arg_type = T_ILLEGAL;
       
   477       if (ek == _bound_long_mh || ek == _bound_long_direct_mh) {
       
   478         arg_type = T_LONG;
       
   479       } else if (ek == _bound_int_mh || ek == _bound_int_direct_mh) {
       
   480         arg_type = T_INT;
       
   481       } else {
       
   482         assert(ek == _bound_ref_mh || ek == _bound_ref_direct_mh, "must be ref");
       
   483         arg_type = T_OBJECT;
       
   484       }
       
   485       int arg_slots = type2size[arg_type];
       
   486       int arg_mask  = (arg_type == T_OBJECT ? _INSERT_REF_MASK :
       
   487                        arg_slots == 1       ? _INSERT_INT_MASK :  _INSERT_LONG_MASK);
       
   488 
       
   489       // make room for the new argument:
       
   490       __ movl(rax_argslot, rcx_bmh_vmargslot);
       
   491       __ lea(rax_argslot, __ argument_address(rax_argslot));
       
   492       insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask,
       
   493                        rax_argslot, rbx_temp, rdx_temp);
       
   494 
       
   495       // store bound argument into the new stack slot:
       
   496       __ movptr(rbx_temp, rcx_bmh_argument);
       
   497       Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
       
   498       if (arg_type == T_OBJECT) {
       
   499         __ movptr(Address(rax_argslot, 0), rbx_temp);
       
   500       } else {
       
   501         __ load_sized_value(rbx_temp, prim_value_addr,
       
   502                             type2aelembytes(arg_type), is_signed_subword_type(arg_type));
       
   503         __ movptr(Address(rax_argslot, 0), rbx_temp);
       
   504 #ifndef _LP64
       
   505         if (arg_slots == 2) {
       
   506           __ movl(rbx_temp, prim_value_addr.plus_disp(wordSize));
       
   507           __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rbx_temp);
       
   508         }
       
   509 #endif //_LP64
       
   510         break;
       
   511       }
       
   512 
       
   513       if (direct_to_method) {
       
   514         Register rbx_method = rbx_temp;
       
   515         __ movptr(rbx_method, rcx_mh_vmtarget);
       
   516         __ verify_oop(rbx_method);
       
   517         __ jmp(rbx_method_fie);
       
   518       } else {
       
   519         __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   520         __ verify_oop(rcx_recv);
       
   521         __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   522       }
       
   523     }
       
   524     break;
       
   525 
       
   526   case _adapter_retype_only:
       
   527     // immediately jump to the next MH layer:
       
   528     __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   529     __ verify_oop(rcx_recv);
       
   530     __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   531     // This is OK when all parameter types widen.
       
   532     // It is also OK when a return type narrows.
       
   533     break;
       
   534 
       
   535   case _adapter_check_cast:
       
   536     {
       
   537       // temps:
       
   538       Register rbx_klass = rbx_temp; // interesting AMH data
       
   539 
       
   540       // check a reference argument before jumping to the next layer of MH:
       
   541       __ movl(rax_argslot, rcx_amh_vmargslot);
       
   542       vmarg = __ argument_address(rax_argslot);
       
   543 
       
   544       // What class are we casting to?
       
   545       __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
       
   546       __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
       
   547 
       
   548       // get the new MH:
       
   549       __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   550       // (now we are done with the old MH)
       
   551 
       
   552       Label done;
       
   553       __ movptr(rdx_temp, vmarg);
       
   554       __ testl(rdx_temp, rdx_temp);
       
   555       __ jcc(Assembler::zero, done);          // no cast if null
       
   556       __ load_klass(rdx_temp, rdx_temp);
       
   557 
       
   558       // live at this point:
       
   559       // - rbx_klass:  klass required by the target method
       
   560       // - rdx_temp:   argument klass to test
       
   561       // - rcx_recv:   method handle to invoke (after cast succeeds)
       
   562       __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);
       
   563 
       
   564       // If we get here, the type check failed!
       
   565       // Call the wrong_method_type stub, passing the failing argument type in rax.
       
   566       Register rax_mtype = rax_argslot;
       
   567       __ push(rbx_klass);       // missed klass (required type)
       
   568       __ push(rdx_temp);        // bad actual type (1st stacked argument)
       
   569       __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
       
   570 
       
   571       __ bind(done);
       
   572       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   573     }
       
   574     break;
       
   575 
       
   576   case _adapter_prim_to_prim:
       
   577   case _adapter_ref_to_prim:
       
   578     // handled completely by optimized cases
       
   579     __ stop("init_AdapterMethodHandle should not issue this");
       
   580     break;
       
   581 
       
   582   case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
       
   583 //case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
       
   584   case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
       
   585   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
       
   586     {
       
   587       // perform an in-place conversion to int or an int subword
       
   588       __ movl(rax_argslot, rcx_amh_vmargslot);
       
   589       vmarg = __ argument_address(rax_argslot);
       
   590 
       
   591       switch (ek) {
       
   592       case _adapter_opt_i2i:
       
   593         __ movl(rdx_temp, vmarg);
       
   594         break;
       
   595       case _adapter_opt_l2i:
       
   596         {
       
   597           // just delete the extra slot; on a little-endian machine we keep the first
       
   598           __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
       
   599           remove_arg_slots(_masm, -stack_move_unit(),
       
   600                            rax_argslot, rbx_temp, rdx_temp);
       
   601           vmarg = Address(rax_argslot, -Interpreter::stackElementSize());
       
   602           __ movl(rdx_temp, vmarg);
       
   603         }
       
   604         break;
       
   605       case _adapter_opt_unboxi:
       
   606         {
       
   607           // Load the value up from the heap.
       
   608           __ movptr(rdx_temp, vmarg);
       
   609           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
       
   610 #ifdef ASSERT
       
   611           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
       
   612             if (is_subword_type(BasicType(bt)))
       
   613               assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
       
   614           }
       
   615 #endif
       
   616           __ null_check(rdx_temp, value_offset);
       
   617           __ movl(rdx_temp, Address(rdx_temp, value_offset));
       
   618           // We load this as a word.  Because we are little-endian,
       
   619           // the low bits will be correct, but the high bits may need cleaning.
       
   620           // The vminfo will guide us to clean those bits.
       
   621         }
       
   622         break;
       
   623       default:
       
   624         assert(false, "");
       
   625       }
       
   626       goto finish_int_conversion;
       
   627     }
       
   628 
       
   629   finish_int_conversion:
       
   630     {
       
   631       Register rbx_vminfo = rbx_temp;
       
   632       __ movl(rbx_vminfo, rcx_amh_conversion);
       
   633       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
       
   634 
       
   635       // get the new MH:
       
   636       __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   637       // (now we are done with the old MH)
       
   638 
       
   639       // original 32-bit vmdata word must be of this form:
       
   640       //    | MBZ:16 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
       
   641       __ xchgl(rcx, rbx_vminfo);                // free rcx for shifts
       
   642       __ shll(rdx_temp /*, rcx*/);
       
   643       Label zero_extend, done;
       
   644       __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
       
   645       __ jcc(Assembler::zero, zero_extend);
       
   646 
       
   647       // this path is taken for int->byte, int->short
       
   648       __ sarl(rdx_temp /*, rcx*/);
       
   649       __ jmp(done);
       
   650 
       
   651       __ bind(zero_extend);
       
   652       // this is taken for int->char
       
   653       __ shrl(rdx_temp /*, rcx*/);
       
   654 
       
   655       __ bind(done);
       
   656       __ movptr(vmarg, rdx_temp);
       
   657       __ xchgl(rcx, rbx_vminfo);                // restore rcx_recv
       
   658 
       
   659       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   660     }
       
   661     break;
       
   662 
       
   663   case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
       
   664   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
       
   665     {
       
   666       // perform an in-place int-to-long or ref-to-long conversion
       
   667       __ movl(rax_argslot, rcx_amh_vmargslot);
       
   668 
       
   669       // on a little-endian machine we keep the first slot and add another after
       
   670       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
       
   671       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
       
   672                        rax_argslot, rbx_temp, rdx_temp);
       
   673       Address vmarg1(rax_argslot, -Interpreter::stackElementSize());
       
   674       Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize());
       
   675 
       
   676       switch (ek) {
       
   677       case _adapter_opt_i2l:
       
   678         {
       
   679           __ movl(rdx_temp, vmarg1);
       
   680           __ sarl(rdx_temp, 31);  // __ extend_sign()
       
   681           __ movl(vmarg2, rdx_temp); // store second word
       
   682         }
       
   683         break;
       
   684       case _adapter_opt_unboxl:
       
   685         {
       
   686           // Load the value up from the heap.
       
   687           __ movptr(rdx_temp, vmarg1);
       
   688           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
       
   689           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
       
   690           __ null_check(rdx_temp, value_offset);
       
   691           __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
       
   692           __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
       
   693           __ movl(vmarg1, rbx_temp);
       
   694           __ movl(vmarg2, rdx_temp);
       
   695         }
       
   696         break;
       
   697       default:
       
   698         assert(false, "");
       
   699       }
       
   700 
       
   701       __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   702       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   703     }
       
   704     break;
       
   705 
       
   706   case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
       
   707   case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
       
   708     {
       
   709       // perform an in-place floating primitive conversion
       
   710       __ movl(rax_argslot, rcx_amh_vmargslot);
       
   711       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
       
   712       if (ek == _adapter_opt_f2d) {
       
   713         insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
       
   714                          rax_argslot, rbx_temp, rdx_temp);
       
   715       }
       
   716       Address vmarg(rax_argslot, -Interpreter::stackElementSize());
       
   717 
       
   718 #ifdef _LP64
       
   719       if (ek == _adapter_opt_f2d) {
       
   720         __ movflt(xmm0, vmarg);
       
   721         __ cvtss2sd(xmm0, xmm0);
       
   722         __ movdbl(vmarg, xmm0);
       
   723       } else {
       
   724         __ movdbl(xmm0, vmarg);
       
   725         __ cvtsd2ss(xmm0, xmm0);
       
   726         __ movflt(vmarg, xmm0);
       
   727       }
       
   728 #else //_LP64
       
   729       if (ek == _adapter_opt_f2d) {
       
   730         __ fld_s(vmarg);        // load float to ST0
       
   731         __ fstp_s(vmarg);       // store single
       
   732       } else if (!TaggedStackInterpreter) {
       
   733         __ fld_d(vmarg);        // load double to ST0
       
   734         __ fstp_s(vmarg);       // store single
       
   735       } else {
       
   736         Address vmarg_tag = vmarg.plus_disp(tag_offset);
       
   737         Address vmarg2    = vmarg.plus_disp(Interpreter::stackElementSize());
       
   738         // vmarg2_tag does not participate in this code
       
   739         Register rbx_tag = rbx_temp;
       
   740         __ movl(rbx_tag, vmarg_tag); // preserve tag
       
   741         __ movl(rdx_temp, vmarg2); // get second word of double
       
   742         __ movl(vmarg_tag, rdx_temp); // align with first word
       
   743         __ fld_d(vmarg);        // load double to ST0
       
   744         __ movl(vmarg_tag, rbx_tag); // restore tag
       
   745         __ fstp_s(vmarg);       // store single
       
   746       }
       
   747 #endif //_LP64
       
   748 
       
   749       if (ek == _adapter_opt_d2f) {
       
   750         remove_arg_slots(_masm, -stack_move_unit(),
       
   751                          rax_argslot, rbx_temp, rdx_temp);
       
   752       }
       
   753 
       
   754       __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   755       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   756     }
       
   757     break;
       
   758 
       
   759   case _adapter_prim_to_ref:
       
   760     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
       
   761     break;
       
   762 
       
   763   case _adapter_swap_args:
       
   764   case _adapter_rot_args:
       
   765     // handled completely by optimized cases
       
   766     __ stop("init_AdapterMethodHandle should not issue this");
       
   767     break;
       
   768 
       
   769   case _adapter_opt_swap_1:
       
   770   case _adapter_opt_swap_2:
       
   771   case _adapter_opt_rot_1_up:
       
   772   case _adapter_opt_rot_1_down:
       
   773   case _adapter_opt_rot_2_up:
       
   774   case _adapter_opt_rot_2_down:
       
   775     {
       
   776       int rotate = 0, swap_slots = 0;
       
   777       switch ((int)ek) {
       
   778       case _adapter_opt_swap_1:     swap_slots = 1; break;
       
   779       case _adapter_opt_swap_2:     swap_slots = 2; break;
       
   780       case _adapter_opt_rot_1_up:   swap_slots = 1; rotate++; break;
       
   781       case _adapter_opt_rot_1_down: swap_slots = 1; rotate--; break;
       
   782       case _adapter_opt_rot_2_up:   swap_slots = 2; rotate++; break;
       
   783       case _adapter_opt_rot_2_down: swap_slots = 2; rotate--; break;
       
   784       default: assert(false, "");
       
   785       }
       
   786 
       
   787       // the real size of the move must be doubled if TaggedStackInterpreter:
       
   788       int swap_bytes = (int)( swap_slots * Interpreter::stackElementWords() * wordSize );
       
   789 
       
   790       // 'argslot' is the position of the first argument to swap
       
   791       __ movl(rax_argslot, rcx_amh_vmargslot);
       
   792       __ lea(rax_argslot, __ argument_address(rax_argslot));
       
   793 
       
   794       // 'vminfo' is the second
       
   795       Register rbx_destslot = rbx_temp;
       
   796       __ movl(rbx_destslot, rcx_amh_conversion);
       
   797       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
       
   798       __ andl(rbx_destslot, CONV_VMINFO_MASK);
       
   799       __ lea(rbx_destslot, __ argument_address(rbx_destslot));
       
   800       DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"));
       
   801 
       
   802       if (!rotate) {
       
   803         for (int i = 0; i < swap_bytes; i += wordSize) {
       
   804           __ movptr(rdx_temp, Address(rax_argslot , i));
       
   805           __ push(rdx_temp);
       
   806           __ movptr(rdx_temp, Address(rbx_destslot, i));
       
   807           __ movptr(Address(rax_argslot, i), rdx_temp);
       
   808           __ pop(rdx_temp);
       
   809           __ movptr(Address(rbx_destslot, i), rdx_temp);
       
   810         }
       
   811       } else {
       
   812         // push the first chunk, which is going to get overwritten
       
   813         for (int i = swap_bytes; (i -= wordSize) >= 0; ) {
       
   814           __ movptr(rdx_temp, Address(rax_argslot, i));
       
   815           __ push(rdx_temp);
       
   816         }
       
   817 
       
   818         if (rotate > 0) {
       
   819           // rotate upward
       
   820           __ subptr(rax_argslot, swap_bytes);
       
   821 #ifdef ASSERT
       
   822           {
       
   823             // Verify that argslot > destslot, by at least swap_bytes.
       
   824             Label L_ok;
       
   825             __ cmpptr(rax_argslot, rbx_destslot);
       
   826             __ jcc(Assembler::aboveEqual, L_ok);
       
   827             __ stop("source must be above destination (upward rotation)");
       
   828             __ bind(L_ok);
       
   829           }
       
   830 #endif
       
   831           // work argslot down to destslot, copying contiguous data upwards
       
   832           // pseudo-code:
       
   833           //   rax = src_addr - swap_bytes
       
   834           //   rbx = dest_addr
       
   835           //   while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
       
   836           Label loop;
       
   837           __ bind(loop);
       
   838           __ movptr(rdx_temp, Address(rax_argslot, 0));
       
   839           __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
       
   840           __ addptr(rax_argslot, -wordSize);
       
   841           __ cmpptr(rax_argslot, rbx_destslot);
       
   842           __ jcc(Assembler::aboveEqual, loop);
       
   843         } else {
       
   844           __ addptr(rax_argslot, swap_bytes);
       
   845 #ifdef ASSERT
       
   846           {
       
   847             // Verify that argslot < destslot, by at least swap_bytes.
       
   848             Label L_ok;
       
   849             __ cmpptr(rax_argslot, rbx_destslot);
       
   850             __ jcc(Assembler::belowEqual, L_ok);
       
   851             __ stop("source must be below destination (downward rotation)");
       
   852             __ bind(L_ok);
       
   853           }
       
   854 #endif
       
   855           // work argslot up to destslot, copying contiguous data downwards
       
   856           // pseudo-code:
       
   857           //   rax = src_addr + swap_bytes
       
   858           //   rbx = dest_addr
       
   859           //   while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
       
   860           Label loop;
       
   861           __ bind(loop);
       
   862           __ movptr(rdx_temp, Address(rax_argslot, 0));
       
   863           __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
       
   864           __ addptr(rax_argslot, wordSize);
       
   865           __ cmpptr(rax_argslot, rbx_destslot);
       
   866           __ jcc(Assembler::belowEqual, loop);
       
   867         }
       
   868 
       
   869         // pop the original first chunk into the destination slot, now free
       
   870         for (int i = 0; i < swap_bytes; i += wordSize) {
       
   871           __ pop(rdx_temp);
       
   872           __ movptr(Address(rbx_destslot, i), rdx_temp);
       
   873         }
       
   874       }
       
   875 
       
   876       __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   877       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   878     }
       
   879     break;
       
   880 
       
   881   case _adapter_dup_args:
       
   882     {
       
   883       // 'argslot' is the position of the first argument to duplicate
       
   884       __ movl(rax_argslot, rcx_amh_vmargslot);
       
   885       __ lea(rax_argslot, __ argument_address(rax_argslot));
       
   886 
       
   887       // 'stack_move' is negative number of words to duplicate
       
   888       Register rdx_stack_move = rdx_temp;
       
   889       __ movl(rdx_stack_move, rcx_amh_conversion);
       
   890       __ sarl(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
       
   891 
       
   892       int argslot0_num = 0;
       
   893       Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
       
   894       assert(argslot0.base() == rsp, "");
       
   895       int pre_arg_size = argslot0.disp();
       
   896       assert(pre_arg_size % wordSize == 0, "");
       
   897       assert(pre_arg_size > 0, "must include PC");
       
   898 
       
   899       // remember the old rsp+1 (argslot[0])
       
   900       Register rbx_oldarg = rbx_temp;
       
   901       __ lea(rbx_oldarg, argslot0);
       
   902 
       
   903       // move rsp down to make room for dups
       
   904       __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));
       
   905 
       
   906       // compute the new rsp+1 (argslot[0])
       
   907       Register rdx_newarg = rdx_temp;
       
   908       __ lea(rdx_newarg, argslot0);
       
   909 
       
   910       __ push(rdi);             // need a temp
       
   911       // (preceding push must be done after arg addresses are taken!)
       
   912 
       
   913       // pull down the pre_arg_size data (PC)
       
   914       for (int i = -pre_arg_size; i < 0; i += wordSize) {
       
   915         __ movptr(rdi, Address(rbx_oldarg, i));
       
   916         __ movptr(Address(rdx_newarg, i), rdi);
       
   917       }
       
   918 
       
   919       // copy from rax_argslot[0...] down to new_rsp[1...]
       
   920       // pseudo-code:
       
   921       //   rbx = old_rsp+1
       
   922       //   rdx = new_rsp+1
       
   923       //   rax = argslot
       
   924       //   while (rdx < rbx) *rdx++ = *rax++
       
   925       Label loop;
       
   926       __ bind(loop);
       
   927       __ movptr(rdi, Address(rax_argslot, 0));
       
   928       __ movptr(Address(rdx_newarg, 0), rdi);
       
   929       __ addptr(rax_argslot, wordSize);
       
   930       __ addptr(rdx_newarg, wordSize);
       
   931       __ cmpptr(rdx_newarg, rbx_oldarg);
       
   932       __ jcc(Assembler::less, loop);
       
   933 
       
   934       __ pop(rdi);              // restore temp
       
   935 
       
   936       __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   937       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   938     }
       
   939     break;
       
   940 
       
   941   case _adapter_drop_args:
       
   942     {
       
   943       // 'argslot' is the position of the first argument to nuke
       
   944       __ movl(rax_argslot, rcx_amh_vmargslot);
       
   945       __ lea(rax_argslot, __ argument_address(rax_argslot));
       
   946 
       
   947       __ push(rdi);             // need a temp
       
   948       // (must do previous push after argslot address is taken)
       
   949 
       
   950       // 'stack_move' is number of words to drop
       
   951       Register rdi_stack_move = rdi;
       
   952       __ movl(rdi_stack_move, rcx_amh_conversion);
       
   953       __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
       
   954       remove_arg_slots(_masm, rdi_stack_move,
       
   955                        rax_argslot, rbx_temp, rdx_temp);
       
   956 
       
   957       __ pop(rdi);              // restore temp
       
   958 
       
   959       __ movptr(rcx_recv, rcx_mh_vmtarget);
       
   960       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
   961     }
       
   962     break;
       
   963 
       
   964   case _adapter_collect_args:
       
   965     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
       
   966     break;
       
   967 
       
   968   case _adapter_spread_args:
       
   969     // handled completely by optimized cases
       
   970     __ stop("init_AdapterMethodHandle should not issue this");
       
   971     break;
       
   972 
       
   973   case _adapter_opt_spread_0:
       
   974   case _adapter_opt_spread_1:
       
   975   case _adapter_opt_spread_more:
       
   976     {
       
   977       // spread an array out into a group of arguments
       
   978       int length_constant = -1;
       
   979       switch (ek) {
       
   980       case _adapter_opt_spread_0: length_constant = 0; break;
       
   981       case _adapter_opt_spread_1: length_constant = 1; break;
       
   982       }
       
   983 
       
   984       // find the address of the array argument
       
   985       __ movl(rax_argslot, rcx_amh_vmargslot);
       
   986       __ lea(rax_argslot, __ argument_address(rax_argslot));
       
   987 
       
   988       // grab some temps
       
   989       { __ push(rsi); __ push(rdi); }
       
   990       // (preceding pushes must be done after argslot address is taken!)
       
   991 #define UNPUSH_RSI_RDI \
       
   992       { __ pop(rdi); __ pop(rsi); }
       
   993 
       
   994       // arx_argslot points both to the array and to the first output arg
       
   995       vmarg = Address(rax_argslot, 0);
       
   996 
       
   997       // Get the array value.
       
   998       Register  rsi_array       = rsi;
       
   999       Register  rdx_array_klass = rdx_temp;
       
  1000       BasicType elem_type       = T_OBJECT;
       
  1001       int       length_offset   = arrayOopDesc::length_offset_in_bytes();
       
  1002       int       elem0_offset    = arrayOopDesc::base_offset_in_bytes(elem_type);
       
  1003       __ movptr(rsi_array, vmarg);
       
  1004       Label skip_array_check;
       
  1005       if (length_constant == 0) {
       
  1006         __ testptr(rsi_array, rsi_array);
       
  1007         __ jcc(Assembler::zero, skip_array_check);
       
  1008       }
       
  1009       __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
       
  1010       __ load_klass(rdx_array_klass, rsi_array);
       
  1011 
       
  1012       // Check the array type.
       
  1013       Register rbx_klass = rbx_temp;
       
  1014       __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
       
  1015       __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
       
  1016 
       
  1017       Label ok_array_klass, bad_array_klass, bad_array_length;
       
  1018       __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
       
  1019       // If we get here, the type check failed!
       
  1020       __ jmp(bad_array_klass);
       
  1021       __ bind(ok_array_klass);
       
  1022 
       
  1023       // Check length.
       
  1024       if (length_constant >= 0) {
       
  1025         __ cmpl(Address(rsi_array, length_offset), length_constant);
       
  1026       } else {
       
  1027         Register rbx_vminfo = rbx_temp;
       
  1028         __ movl(rbx_vminfo, rcx_amh_conversion);
       
  1029         assert(CONV_VMINFO_SHIFT == 0, "preshifted");
       
  1030         __ andl(rbx_vminfo, CONV_VMINFO_MASK);
       
  1031         __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
       
  1032       }
       
  1033       __ jcc(Assembler::notEqual, bad_array_length);
       
  1034 
       
  1035       Register rdx_argslot_limit = rdx_temp;
       
  1036 
       
  1037       // Array length checks out.  Now insert any required stack slots.
       
  1038       if (length_constant == -1) {
       
  1039         // Form a pointer to the end of the affected region.
       
  1040         __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
       
  1041         // 'stack_move' is negative number of words to insert
       
  1042         Register rdi_stack_move = rdi;
       
  1043         __ movl(rdi_stack_move, rcx_amh_conversion);
       
  1044         __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
       
  1045         Register rsi_temp = rsi_array;  // spill this
       
  1046         insert_arg_slots(_masm, rdi_stack_move, -1,
       
  1047                          rax_argslot, rbx_temp, rsi_temp);
       
  1048         // reload the array (since rsi was killed)
       
  1049         __ movptr(rsi_array, vmarg);
       
  1050       } else if (length_constant > 1) {
       
  1051         int arg_mask = 0;
       
  1052         int new_slots = (length_constant - 1);
       
  1053         for (int i = 0; i < new_slots; i++) {
       
  1054           arg_mask <<= 1;
       
  1055           arg_mask |= _INSERT_REF_MASK;
       
  1056         }
       
  1057         insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
       
  1058                          rax_argslot, rbx_temp, rdx_temp);
       
  1059       } else if (length_constant == 1) {
       
  1060         // no stack resizing required
       
  1061       } else if (length_constant == 0) {
       
  1062         remove_arg_slots(_masm, -stack_move_unit(),
       
  1063                          rax_argslot, rbx_temp, rdx_temp);
       
  1064       }
       
  1065 
       
  1066       // Copy from the array to the new slots.
       
  1067       // Note: Stack change code preserves integrity of rax_argslot pointer.
       
  1068       // So even after slot insertions, rax_argslot still points to first argument.
       
  1069       if (length_constant == -1) {
       
  1070         // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
       
  1071         Register rsi_source = rsi_array;
       
  1072         __ lea(rsi_source, Address(rsi_array, elem0_offset));
       
  1073         Label loop;
       
  1074         __ bind(loop);
       
  1075         __ movptr(rbx_temp, Address(rsi_source, 0));
       
  1076         __ movptr(Address(rax_argslot, 0), rbx_temp);
       
  1077         __ addptr(rsi_source, type2aelembytes(elem_type));
       
  1078         if (TaggedStackInterpreter) {
       
  1079           __ movptr(Address(rax_argslot, tag_offset),
       
  1080                     frame::tag_for_basic_type(elem_type));
       
  1081         }
       
  1082         __ addptr(rax_argslot, Interpreter::stackElementSize());
       
  1083         __ cmpptr(rax_argslot, rdx_argslot_limit);
       
  1084         __ jcc(Assembler::less, loop);
       
  1085       } else if (length_constant == 0) {
       
  1086         __ bind(skip_array_check);
       
  1087         // nothing to copy
       
  1088       } else {
       
  1089         int elem_offset = elem0_offset;
       
  1090         int slot_offset = 0;
       
  1091         for (int index = 0; index < length_constant; index++) {
       
  1092           __ movptr(rbx_temp, Address(rsi_array, elem_offset));
       
  1093           __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
       
  1094           elem_offset += type2aelembytes(elem_type);
       
  1095           if (TaggedStackInterpreter) {
       
  1096             __ movptr(Address(rax_argslot, slot_offset + tag_offset),
       
  1097                       frame::tag_for_basic_type(elem_type));
       
  1098           }
       
  1099           slot_offset += Interpreter::stackElementSize();
       
  1100         }
       
  1101       }
       
  1102 
       
  1103       // Arguments are spread.  Move to next method handle.
       
  1104       UNPUSH_RSI_RDI;
       
  1105       __ movptr(rcx_recv, rcx_mh_vmtarget);
       
  1106       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       
  1107 
       
  1108       __ bind(bad_array_klass);
       
  1109       UNPUSH_RSI_RDI;
       
  1110       __ stop("bad array klass NYI");
       
  1111 
       
  1112       __ bind(bad_array_length);
       
  1113       UNPUSH_RSI_RDI;
       
  1114       __ stop("bad array length NYI");
       
  1115 
       
  1116 #undef UNPUSH_RSI_RDI
       
  1117     }
       
  1118     break;
       
  1119 
       
  1120   case _adapter_flyby:
       
  1121   case _adapter_ricochet:
       
  1122     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
       
  1123     break;
       
  1124 
       
  1125   default:  ShouldNotReachHere();
       
  1126   }
       
  1127   __ hlt();
       
  1128 
       
  1129   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
       
  1130   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
       
  1131 
       
  1132   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
       
  1133 }