hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp
changeset 35214 d86005e0b4c2
parent 35211 3771329165d4
child 35215 f9536fc8548c
equal deleted inserted replaced
35211:3771329165d4 35214:d86005e0b4c2
     1 /*
       
     2  * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "asm/assembler.hpp"
       
    27 #include "interpreter/bytecodeHistogram.hpp"
       
    28 #include "interpreter/cppInterpreter.hpp"
       
    29 #include "interpreter/interpreter.hpp"
       
    30 #include "interpreter/interpreterGenerator.hpp"
       
    31 #include "interpreter/interpreterRuntime.hpp"
       
    32 #include "interpreter/interp_masm.hpp"
       
    33 #include "oops/arrayOop.hpp"
       
    34 #include "oops/methodData.hpp"
       
    35 #include "oops/method.hpp"
       
    36 #include "oops/oop.inline.hpp"
       
    37 #include "prims/jvmtiExport.hpp"
       
    38 #include "prims/jvmtiThreadState.hpp"
       
    39 #include "runtime/arguments.hpp"
       
    40 #include "runtime/deoptimization.hpp"
       
    41 #include "runtime/frame.inline.hpp"
       
    42 #include "runtime/interfaceSupport.hpp"
       
    43 #include "runtime/sharedRuntime.hpp"
       
    44 #include "runtime/stubRoutines.hpp"
       
    45 #include "runtime/synchronizer.hpp"
       
    46 #include "runtime/timer.hpp"
       
    47 #include "runtime/vframeArray.hpp"
       
    48 #include "utilities/debug.hpp"
       
    49 #include "utilities/macros.hpp"
       
    50 #ifdef SHARK
       
    51 #include "shark/shark_globals.hpp"
       
    52 #endif
       
    53 
       
    54 #ifdef CC_INTERP
       
    55 
       
    56 // Routine exists to make tracebacks look decent in debugger
       
    57 // while "shadow" interpreter frames are on stack. It is also
       
    58 // used to distinguish interpreter frames.
       
    59 
       
    60 extern "C" void RecursiveInterpreterActivation(interpreterState istate) {
       
    61   ShouldNotReachHere();
       
    62 }
       
    63 
       
    64 bool CppInterpreter::contains(address pc) {
       
    65   return ( _code->contains(pc) ||
       
    66          ( pc == (CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset)));
       
    67 }
       
    68 
       
    69 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
       
    70 #define __ _masm->
       
    71 
       
    72 Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
       
    73 
       
    74 static address unctrap_frame_manager_entry  = NULL;
       
    75 
       
    76 static address interpreter_return_address  = NULL;
       
    77 static address deopt_frame_manager_return_atos  = NULL;
       
    78 static address deopt_frame_manager_return_btos  = NULL;
       
    79 static address deopt_frame_manager_return_itos  = NULL;
       
    80 static address deopt_frame_manager_return_ltos  = NULL;
       
    81 static address deopt_frame_manager_return_ftos  = NULL;
       
    82 static address deopt_frame_manager_return_dtos  = NULL;
       
    83 static address deopt_frame_manager_return_vtos  = NULL;
       
    84 
       
    85 const Register prevState = G1_scratch;
       
    86 
       
    87 void InterpreterGenerator::save_native_result(void) {
       
    88   // result potentially in O0/O1: save it across calls
       
    89   __ stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
       
    90 #ifdef _LP64
       
    91   __ stx(O0, STATE(_native_lresult));
       
    92 #else
       
    93   __ std(O0, STATE(_native_lresult));
       
    94 #endif
       
    95 }
       
    96 
       
    97 void InterpreterGenerator::restore_native_result(void) {
       
    98 
       
    99   // Restore any method result value
       
   100   __ ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
       
   101 #ifdef _LP64
       
   102   __ ldx(STATE(_native_lresult), O0);
       
   103 #else
       
   104   __ ldd(STATE(_native_lresult), O0);
       
   105 #endif
       
   106 }
       
   107 
       
   108 // A result handler converts/unboxes a native call result into
       
   109 // a java interpreter/compiler result. The current frame is an
       
   110 // interpreter frame. The activation frame unwind code must be
       
   111 // consistent with that of TemplateTable::_return(...). In the
       
   112 // case of native methods, the caller's SP was not modified.
       
   113 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
       
   114   address entry = __ pc();
       
   115   Register Itos_i  = Otos_i ->after_save();
       
   116   Register Itos_l  = Otos_l ->after_save();
       
   117   Register Itos_l1 = Otos_l1->after_save();
       
   118   Register Itos_l2 = Otos_l2->after_save();
       
   119   switch (type) {
       
   120     case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
       
   121     case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
       
   122     case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
       
   123     case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
       
   124     case T_LONG   :
       
   125 #ifndef _LP64
       
   126                     __ mov(O1, Itos_l2);  // move other half of long
       
   127 #endif              // ifdef or no ifdef, fall through to the T_INT case
       
   128     case T_INT    : __ mov(O0, Itos_i);                         break;
       
   129     case T_VOID   : /* nothing to do */                         break;
       
   130     case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
       
   131     case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
       
   132     case T_OBJECT :
       
   133       __ ld_ptr(STATE(_oop_temp), Itos_i);
       
   134       __ verify_oop(Itos_i);
       
   135       break;
       
   136     default       : ShouldNotReachHere();
       
   137   }
       
   138   __ ret();                           // return from interpreter activation
       
   139   __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
       
   140   NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
       
   141   return entry;
       
   142 }
       
   143 
       
   144 // tosca based result to c++ interpreter stack based result.
       
   145 // Result goes to address in L1_scratch
       
   146 
       
   147 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
       
   148   // A result is in the native abi result register from a native method call.
       
   149   // We need to return this result to the interpreter by pushing the result on the interpreter's
       
   150   // stack. This is relatively simple the destination is in L1_scratch
       
   151   // i.e. L1_scratch is the first free element on the stack. If we "push" a return value we must
       
   152   // adjust L1_scratch
       
   153   address entry = __ pc();
       
   154   switch (type) {
       
   155     case T_BOOLEAN:
       
   156       // !0 => true; 0 => false
       
   157       __ subcc(G0, O0, G0);
       
   158       __ addc(G0, 0, O0);
       
   159       __ st(O0, L1_scratch, 0);
       
   160       __ sub(L1_scratch, wordSize, L1_scratch);
       
   161       break;
       
   162 
       
   163     // cannot use and3, 0xFFFF too big as immediate value!
       
   164     case T_CHAR   :
       
   165       __ sll(O0, 16, O0);
       
   166       __ srl(O0, 16, O0);
       
   167       __ st(O0, L1_scratch, 0);
       
   168       __ sub(L1_scratch, wordSize, L1_scratch);
       
   169       break;
       
   170 
       
   171     case T_BYTE   :
       
   172       __ sll(O0, 24, O0);
       
   173       __ sra(O0, 24, O0);
       
   174       __ st(O0, L1_scratch, 0);
       
   175       __ sub(L1_scratch, wordSize, L1_scratch);
       
   176       break;
       
   177 
       
   178     case T_SHORT  :
       
   179       __ sll(O0, 16, O0);
       
   180       __ sra(O0, 16, O0);
       
   181       __ st(O0, L1_scratch, 0);
       
   182       __ sub(L1_scratch, wordSize, L1_scratch);
       
   183       break;
       
   184     case T_LONG   :
       
   185 #ifndef _LP64
       
   186 #if defined(COMPILER2)
       
   187   // All return values are where we want them, except for Longs.  C2 returns
       
   188   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
       
   189   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
       
   190   // build even if we are returning from interpreted we just do a little
       
   191   // stupid shuffing.
       
   192   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
       
   193   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
       
   194   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
       
   195       __ stx(G1, L1_scratch, -wordSize);
       
   196 #else
       
   197       // native result is in O0, O1
       
   198       __ st(O1, L1_scratch, 0);                      // Low order
       
   199       __ st(O0, L1_scratch, -wordSize);              // High order
       
   200 #endif /* COMPILER2 */
       
   201 #else
       
   202       __ stx(O0, L1_scratch, -wordSize);
       
   203 #endif
       
   204       __ sub(L1_scratch, 2*wordSize, L1_scratch);
       
   205       break;
       
   206 
       
   207     case T_INT    :
       
   208       __ st(O0, L1_scratch, 0);
       
   209       __ sub(L1_scratch, wordSize, L1_scratch);
       
   210       break;
       
   211 
       
   212     case T_VOID   : /* nothing to do */
       
   213       break;
       
   214 
       
   215     case T_FLOAT  :
       
   216       __ stf(FloatRegisterImpl::S, F0, L1_scratch, 0);
       
   217       __ sub(L1_scratch, wordSize, L1_scratch);
       
   218       break;
       
   219 
       
   220     case T_DOUBLE :
       
   221       // Every stack slot is aligned on 64 bit, However is this
       
   222       // the correct stack slot on 64bit?? QQQ
       
   223       __ stf(FloatRegisterImpl::D, F0, L1_scratch, -wordSize);
       
   224       __ sub(L1_scratch, 2*wordSize, L1_scratch);
       
   225       break;
       
   226     case T_OBJECT :
       
   227       __ verify_oop(O0);
       
   228       __ st_ptr(O0, L1_scratch, 0);
       
   229       __ sub(L1_scratch, wordSize, L1_scratch);
       
   230       break;
       
   231     default       : ShouldNotReachHere();
       
   232   }
       
   233   __ retl();                          // return from interpreter activation
       
   234   __ delayed()->nop();                // schedule this better
       
   235   NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
       
   236   return entry;
       
   237 }
       
   238 
       
   239 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
       
   240   // A result is in the java expression stack of the interpreted method that has just
       
   241   // returned. Place this result on the java expression stack of the caller.
       
   242   //
       
   243   // The current interpreter activation in Lstate is for the method just returning its
       
   244   // result. So we know that the result of this method is on the top of the current
       
   245   // execution stack (which is pre-pushed) and will be return to the top of the caller
       
   246   // stack. The top of the callers stack is the bottom of the locals of the current
       
   247   // activation.
       
   248   // Because of the way activation are managed by the frame manager the value of esp is
       
   249   // below both the stack top of the current activation and naturally the stack top
       
   250   // of the calling activation. This enable this routine to leave the return address
       
   251   // to the frame manager on the stack and do a vanilla return.
       
   252   //
       
   253   // On entry: O0 - points to source (callee stack top)
       
   254   //           O1 - points to destination (caller stack top [i.e. free location])
       
   255   // destroys O2, O3
       
   256   //
       
   257 
       
   258   address entry = __ pc();
       
   259   switch (type) {
       
   260     case T_VOID:  break;
       
   261       break;
       
   262     case T_FLOAT  :
       
   263     case T_BOOLEAN:
       
   264     case T_CHAR   :
       
   265     case T_BYTE   :
       
   266     case T_SHORT  :
       
   267     case T_INT    :
       
   268       // 1 word result
       
   269       __ ld(O0, 0, O2);
       
   270       __ st(O2, O1, 0);
       
   271       __ sub(O1, wordSize, O1);
       
   272       break;
       
   273     case T_DOUBLE  :
       
   274     case T_LONG    :
       
   275       // return top two words on current expression stack to caller's expression stack
       
   276       // The caller's expression stack is adjacent to the current frame manager's intepretState
       
   277       // except we allocated one extra word for this intepretState so we won't overwrite it
       
   278       // when we return a two word result.
       
   279 #ifdef _LP64
       
   280       __ ld_ptr(O0, 0, O2);
       
   281       __ st_ptr(O2, O1, -wordSize);
       
   282 #else
       
   283       __ ld(O0, 0, O2);
       
   284       __ ld(O0, wordSize, O3);
       
   285       __ st(O3, O1, 0);
       
   286       __ st(O2, O1, -wordSize);
       
   287 #endif
       
   288       __ sub(O1, 2*wordSize, O1);
       
   289       break;
       
   290     case T_OBJECT :
       
   291       __ ld_ptr(O0, 0, O2);
       
   292       __ verify_oop(O2);                                               // verify it
       
   293       __ st_ptr(O2, O1, 0);
       
   294       __ sub(O1, wordSize, O1);
       
   295       break;
       
   296     default       : ShouldNotReachHere();
       
   297   }
       
   298   __ retl();
       
   299   __ delayed()->nop(); // QQ schedule this better
       
   300   return entry;
       
   301 }
       
   302 
       
   303 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
       
   304   // A result is in the java expression stack of the interpreted method that has just
       
   305   // returned. Place this result in the native abi that the caller expects.
       
   306   // We are in a new frame registers we set must be in caller (i.e. callstub) frame.
       
   307   //
       
   308   // Similar to generate_stack_to_stack_converter above. Called at a similar time from the
       
   309   // frame manager execept in this situation the caller is native code (c1/c2/call_stub)
       
   310   // and so rather than return result onto caller's java expression stack we return the
       
   311   // result in the expected location based on the native abi.
       
   312   // On entry: O0 - source (stack top)
       
   313   // On exit result in expected output register
       
   314   // QQQ schedule this better
       
   315 
       
   316   address entry = __ pc();
       
   317   switch (type) {
       
   318     case T_VOID:  break;
       
   319       break;
       
   320     case T_FLOAT  :
       
   321       __ ldf(FloatRegisterImpl::S, O0, 0, F0);
       
   322       break;
       
   323     case T_BOOLEAN:
       
   324     case T_CHAR   :
       
   325     case T_BYTE   :
       
   326     case T_SHORT  :
       
   327     case T_INT    :
       
   328       // 1 word result
       
   329       __ ld(O0, 0, O0->after_save());
       
   330       break;
       
   331     case T_DOUBLE  :
       
   332       __ ldf(FloatRegisterImpl::D, O0, 0, F0);
       
   333       break;
       
   334     case T_LONG    :
       
   335       // return top two words on current expression stack to caller's expression stack
       
   336       // The caller's expression stack is adjacent to the current frame manager's interpretState
       
   337       // except we allocated one extra word for this intepretState so we won't overwrite it
       
   338       // when we return a two word result.
       
   339 #ifdef _LP64
       
   340       __ ld_ptr(O0, 0, O0->after_save());
       
   341 #else
       
   342       __ ld(O0, wordSize, O1->after_save());
       
   343       __ ld(O0, 0, O0->after_save());
       
   344 #endif
       
   345 #if defined(COMPILER2) && !defined(_LP64)
       
   346       // C2 expects long results in G1 we can't tell if we're returning to interpreted
       
   347       // or compiled so just be safe use G1 and O0/O1
       
   348 
       
   349       // Shift bits into high (msb) of G1
       
   350       __ sllx(Otos_l1->after_save(), 32, G1);
       
   351       // Zero extend low bits
       
   352       __ srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
       
   353       __ or3 (Otos_l2->after_save(), G1, G1);
       
   354 #endif /* COMPILER2 */
       
   355       break;
       
   356     case T_OBJECT :
       
   357       __ ld_ptr(O0, 0, O0->after_save());
       
   358       __ verify_oop(O0->after_save());                                               // verify it
       
   359       break;
       
   360     default       : ShouldNotReachHere();
       
   361   }
       
   362   __ retl();
       
   363   __ delayed()->nop();
       
   364   return entry;
       
   365 }
       
   366 
       
   367 address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
       
   368   // make it look good in the debugger
       
   369   return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset;
       
   370 }
       
   371 
       
   372 address CppInterpreter::deopt_entry(TosState state, int length) {
       
   373   address ret = NULL;
       
   374   if (length != 0) {
       
   375     switch (state) {
       
   376       case atos: ret = deopt_frame_manager_return_atos; break;
       
   377       case btos: ret = deopt_frame_manager_return_btos; break;
       
   378       case ctos:
       
   379       case stos:
       
   380       case itos: ret = deopt_frame_manager_return_itos; break;
       
   381       case ltos: ret = deopt_frame_manager_return_ltos; break;
       
   382       case ftos: ret = deopt_frame_manager_return_ftos; break;
       
   383       case dtos: ret = deopt_frame_manager_return_dtos; break;
       
   384       case vtos: ret = deopt_frame_manager_return_vtos; break;
       
   385     }
       
   386   } else {
       
   387     ret = unctrap_frame_manager_entry;  // re-execute the bytecode ( e.g. uncommon trap)
       
   388   }
       
   389   assert(ret != NULL, "Not initialized");
       
   390   return ret;
       
   391 }
       
   392 
       
   393 //
       
   394 // Helpers for commoning out cases in the various type of method entries.
       
   395 //
       
   396 
       
   397 // increment invocation count & check for overflow
       
   398 //
       
   399 // Note: checking for negative value instead of overflow
       
   400 //       so we have a 'sticky' overflow test
       
   401 //
       
   402 // Lmethod: method
       
   403 // ??: invocation counter
       
   404 //
       
   405 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
       
   406   Label done;
       
   407   const Register Rcounters = G3_scratch;
       
   408 
       
   409   __ ld_ptr(STATE(_method), G5_method);
       
   410   __ get_method_counters(G5_method, Rcounters, done);
       
   411 
       
   412   // Update standard invocation counters
       
   413   __ increment_invocation_counter(Rcounters, O0, G4_scratch);
       
   414   if (ProfileInterpreter) {
       
   415     Address interpreter_invocation_counter(Rcounters,
       
   416             in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
       
   417     __ ld(interpreter_invocation_counter, G4_scratch);
       
   418     __ inc(G4_scratch);
       
   419     __ st(G4_scratch, interpreter_invocation_counter);
       
   420   }
       
   421 
       
   422   AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
       
   423   __ load_contents(invocation_limit, G3_scratch);
       
   424   __ cmp(O0, G3_scratch);
       
   425   __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
       
   426   __ delayed()->nop();
       
   427   __ bind(done);
       
   428 }
       
   429 
       
   430 address InterpreterGenerator::generate_empty_entry(void) {
       
   431 
       
   432   // A method that does nothing but return...
       
   433 
       
   434   address entry = __ pc();
       
   435   Label slow_path;
       
   436 
       
   437   // do nothing for empty methods (do not even increment invocation counter)
       
   438   if ( UseFastEmptyMethods) {
       
   439     // If we need a safepoint check, generate full interpreter entry.
       
   440     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
       
   441     __ load_contents(sync_state, G3_scratch);
       
   442     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
       
   443     __ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
       
   444     __ delayed()->nop();
       
   445 
       
   446     // Code: _return
       
   447     __ retl();
       
   448     __ delayed()->mov(O5_savedSP, SP);
       
   449     return entry;
       
   450   }
       
   451   return NULL;
       
   452 }
       
   453 
       
   454 address InterpreterGenerator::generate_Reference_get_entry(void) {
       
   455 #if INCLUDE_ALL_GCS
       
   456   if (UseG1GC) {
       
   457     // We need to generate have a routine that generates code to:
       
   458     //   * load the value in the referent field
       
   459     //   * passes that value to the pre-barrier.
       
   460     //
       
   461     // In the case of G1 this will record the value of the
       
   462     // referent in an SATB buffer if marking is active.
       
   463     // This will cause concurrent marking to mark the referent
       
   464     // field as live.
       
   465     Unimplemented();
       
   466   }
       
   467 #endif // INCLUDE_ALL_GCS
       
   468 
       
   469   // If G1 is not enabled then attempt to go through the accessor entry point
       
   470   // Reference.get is an accessor
       
   471   return NULL;
       
   472 }
       
   473 
       
   474 //
       
   475 // Interpreter stub for calling a native method. (C++ interpreter)
       
   476 // This sets up a somewhat different looking stack for calling the native method
       
   477 // than the typical interpreter frame setup.
       
   478 //
       
   479 
       
   480 address InterpreterGenerator::generate_native_entry(bool synchronized) {
       
   481   address entry = __ pc();
       
   482 
       
   483   // the following temporary registers are used during frame creation
       
   484   const Register Gtmp1 = G3_scratch ;
       
   485   const Register Gtmp2 = G1_scratch;
       
   486   const Register RconstMethod = Gtmp1;
       
   487   const Address constMethod(G5_method, in_bytes(Method::const_offset()));
       
   488   const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
       
   489 
       
   490   bool inc_counter  = UseCompiler || CountCompiledCalls;
       
   491 
       
   492   // make sure registers are different!
       
   493   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
       
   494 
       
   495   const Address access_flags      (G5_method, in_bytes(Method::access_flags_offset()));
       
   496 
       
   497   Label Lentry;
       
   498   __ bind(Lentry);
       
   499 
       
   500   const Register Glocals_size = G3;
       
   501   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
       
   502 
       
   503   // make sure method is native & not abstract
       
   504   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
       
   505 #ifdef ASSERT
       
   506   __ ld(access_flags, Gtmp1);
       
   507   {
       
   508     Label L;
       
   509     __ btst(JVM_ACC_NATIVE, Gtmp1);
       
   510     __ br(Assembler::notZero, false, Assembler::pt, L);
       
   511     __ delayed()->nop();
       
   512     __ stop("tried to execute non-native method as native");
       
   513     __ bind(L);
       
   514   }
       
   515   { Label L;
       
   516     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
       
   517     __ br(Assembler::zero, false, Assembler::pt, L);
       
   518     __ delayed()->nop();
       
   519     __ stop("tried to execute abstract method as non-abstract");
       
   520     __ bind(L);
       
   521   }
       
   522 #endif // ASSERT
       
   523 
       
   524   __ ld_ptr(constMethod, RconstMethod);
       
   525   __ lduh(size_of_parameters, Gtmp1);
       
   526   __ sll(Gtmp1, LogBytesPerWord, Gtmp2);       // parameter size in bytes
       
   527   __ add(Gargs, Gtmp2, Gargs);                 // points to first local + BytesPerWord
       
   528   // NEW
       
   529   __ add(Gargs, -wordSize, Gargs);             // points to first local[0]
       
   530   // generate the code to allocate the interpreter stack frame
       
   531   // NEW FRAME ALLOCATED HERE
       
   532   // save callers original sp
       
   533   // __ mov(SP, I5_savedSP->after_restore());
       
   534 
       
   535   generate_compute_interpreter_state(Lstate, G0, true);
       
   536 
       
   537   // At this point Lstate points to new interpreter state
       
   538   //
       
   539 
       
   540   const Address do_not_unlock_if_synchronized(G2_thread,
       
   541       in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
       
   542   // Since at this point in the method invocation the exception handler
       
   543   // would try to exit the monitor of synchronized methods which hasn't
       
   544   // been entered yet, we set the thread local variable
       
   545   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
       
   546   // runtime, exception handling i.e. unlock_if_synchronized_method will
       
   547   // check this thread local flag.
       
   548   // This flag has two effects, one is to force an unwind in the topmost
       
   549   // interpreter frame and not perform an unlock while doing so.
       
   550 
       
   551   __ movbool(true, G3_scratch);
       
   552   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
       
   553 
       
   554 
       
   555   // increment invocation counter and check for overflow
       
   556   //
       
   557   // Note: checking for negative value instead of overflow
       
   558   //       so we have a 'sticky' overflow test (may be of
       
   559   //       importance as soon as we have true MT/MP)
       
   560   Label invocation_counter_overflow;
       
   561   if (inc_counter) {
       
   562     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
       
   563   }
       
   564   Label Lcontinue;
       
   565   __ bind(Lcontinue);
       
   566 
       
   567   bang_stack_shadow_pages(true);
       
   568   // reset the _do_not_unlock_if_synchronized flag
       
   569   __ stbool(G0, do_not_unlock_if_synchronized);
       
   570 
       
   571   // check for synchronized methods
       
   572   // Must happen AFTER invocation_counter check, so method is not locked
       
   573   // if counter overflows.
       
   574 
       
   575   if (synchronized) {
       
   576     lock_method();
       
   577     // Don't see how G2_thread is preserved here...
       
   578     // __ verify_thread(); QQQ destroys L0,L1 can't use
       
   579   } else {
       
   580 #ifdef ASSERT
       
   581     { Label ok;
       
   582       __ ld_ptr(STATE(_method), G5_method);
       
   583       __ ld(access_flags, O0);
       
   584       __ btst(JVM_ACC_SYNCHRONIZED, O0);
       
   585       __ br( Assembler::zero, false, Assembler::pt, ok);
       
   586       __ delayed()->nop();
       
   587       __ stop("method needs synchronization");
       
   588       __ bind(ok);
       
   589     }
       
   590 #endif // ASSERT
       
   591   }
       
   592 
       
   593   // start execution
       
   594 
       
   595 //   __ verify_thread(); kills L1,L2 can't  use at the moment
       
   596 
       
   597   // jvmti/jvmpi support
       
   598   __ notify_method_entry();
       
   599 
       
   600   // native call
       
   601 
       
   602   // (note that O0 is never an oop--at most it is a handle)
       
   603   // It is important not to smash any handles created by this call,
       
   604   // until any oop handle in O0 is dereferenced.
       
   605 
       
   606   // (note that the space for outgoing params is preallocated)
       
   607 
       
   608   // get signature handler
       
   609 
       
   610   Label pending_exception_present;
       
   611 
       
   612   { Label L;
       
   613     __ ld_ptr(STATE(_method), G5_method);
       
   614     __ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
       
   615     __ tst(G3_scratch);
       
   616     __ brx(Assembler::notZero, false, Assembler::pt, L);
       
   617     __ delayed()->nop();
       
   618     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
       
   619     __ ld_ptr(STATE(_method), G5_method);
       
   620 
       
   621     Address exception_addr(G2_thread, in_bytes(Thread::pending_exception_offset()));
       
   622     __ ld_ptr(exception_addr, G3_scratch);
       
   623     __ br_notnull_short(G3_scratch, Assembler::pn, pending_exception_present);
       
   624     __ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
       
   625     __ bind(L);
       
   626   }
       
   627 
       
   628   // Push a new frame so that the args will really be stored in
       
   629   // Copy a few locals across so the new frame has the variables
       
   630   // we need but these values will be dead at the jni call and
       
   631   // therefore not gc volatile like the values in the current
       
   632   // frame (Lstate in particular)
       
   633 
       
   634   // Flush the state pointer to the register save area
       
   635   // Which is the only register we need for a stack walk.
       
   636   __ st_ptr(Lstate, SP, (Lstate->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
       
   637 
       
   638   __ mov(Lstate, O1);         // Need to pass the state pointer across the frame
       
   639 
       
   640   // Calculate current frame size
       
   641   __ sub(SP, FP, O3);         // Calculate negative of current frame size
       
   642   __ save(SP, O3, SP);        // Allocate an identical sized frame
       
   643 
       
   644   __ mov(I1, Lstate);          // In the "natural" register.
       
   645 
       
   646   // Note I7 has leftover trash. Slow signature handler will fill it in
       
   647   // should we get there. Normal jni call will set reasonable last_Java_pc
       
   648   // below (and fix I7 so the stack trace doesn't have a meaningless frame
       
   649   // in it).
       
   650 
       
   651 
       
   652   // call signature handler
       
   653   __ ld_ptr(STATE(_method), Lmethod);
       
   654   __ ld_ptr(STATE(_locals), Llocals);
       
   655 
       
   656   __ callr(G3_scratch, 0);
       
   657   __ delayed()->nop();
       
   658   __ ld_ptr(STATE(_thread), G2_thread);        // restore thread (shouldn't be needed)
       
   659 
       
   660   { Label not_static;
       
   661 
       
   662     __ ld_ptr(STATE(_method), G5_method);
       
   663     __ ld(access_flags, O0);
       
   664     __ btst(JVM_ACC_STATIC, O0);
       
   665     __ br( Assembler::zero, false, Assembler::pt, not_static);
       
   666     __ delayed()->
       
   667       // get native function entry point(O0 is a good temp until the very end)
       
   668        ld_ptr(Address(G5_method, in_bytes(Method::native_function_offset())), O0);
       
   669     // for static methods insert the mirror argument
       
   670     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
       
   671 
       
   672     __ ld_ptr(Address(G5_method, in_bytes(Method:: const_offset())), O1);
       
   673     __ ld_ptr(Address(O1, in_bytes(ConstMethod::constants_offset())), O1);
       
   674     __ ld_ptr(Address(O1, ConstantPool::pool_holder_offset_in_bytes()), O1);
       
   675     __ ld_ptr(O1, mirror_offset, O1);
       
   676     // where the mirror handle body is allocated:
       
   677 #ifdef ASSERT
       
   678     if (!PrintSignatureHandlers)  // do not dirty the output with this
       
   679     { Label L;
       
   680       __ tst(O1);
       
   681       __ brx(Assembler::notZero, false, Assembler::pt, L);
       
   682       __ delayed()->nop();
       
   683       __ stop("mirror is missing");
       
   684       __ bind(L);
       
   685     }
       
   686 #endif // ASSERT
       
   687     __ st_ptr(O1, STATE(_oop_temp));
       
   688     __ add(STATE(_oop_temp), O1);            // this is really an LEA not an add
       
   689     __ bind(not_static);
       
   690   }
       
   691 
       
   692   // At this point, arguments have been copied off of stack into
       
   693   // their JNI positions, which are O1..O5 and SP[68..].
       
   694   // Oops are boxed in-place on the stack, with handles copied to arguments.
       
   695   // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
       
   696 
       
   697 #ifdef ASSERT
       
   698   { Label L;
       
   699     __ tst(O0);
       
   700     __ brx(Assembler::notZero, false, Assembler::pt, L);
       
   701     __ delayed()->nop();
       
   702     __ stop("native entry point is missing");
       
   703     __ bind(L);
       
   704   }
       
   705 #endif // ASSERT
       
   706 
       
   707   //
       
   708   // setup the java frame anchor
       
   709   //
       
   710   // The scavenge function only needs to know that the PC of this frame is
       
   711   // in the interpreter method entry code, it doesn't need to know the exact
       
   712   // PC and hence we can use O7 which points to the return address from the
       
   713   // previous call in the code stream (signature handler function)
       
   714   //
       
   715   // The other trick is we set last_Java_sp to FP instead of the usual SP because
       
   716   // we have pushed the extra frame in order to protect the volatile register(s)
       
   717   // in that frame when we return from the jni call
       
   718   //
       
   719 
       
   720 
       
   721   __ set_last_Java_frame(FP, O7);
       
   722   __ mov(O7, I7);  // make dummy interpreter frame look like one above,
       
   723                    // not meaningless information that'll confuse me.
       
   724 
       
   725   // flush the windows now. We don't care about the current (protection) frame
       
   726   // only the outer frames
       
   727 
       
   728   __ flushw();
       
   729 
       
   730   // mark windows as flushed
       
   731   Address flags(G2_thread,
       
   732                 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
       
   733   __ set(JavaFrameAnchor::flushed, G3_scratch);
       
   734   __ st(G3_scratch, flags);
       
   735 
       
   736   // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
       
   737 
       
   738   Address thread_state(G2_thread, in_bytes(JavaThread::thread_state_offset()));
       
   739 #ifdef ASSERT
       
   740   { Label L;
       
   741     __ ld(thread_state, G3_scratch);
       
   742     __ cmp(G3_scratch, _thread_in_Java);
       
   743     __ br(Assembler::equal, false, Assembler::pt, L);
       
   744     __ delayed()->nop();
       
   745     __ stop("Wrong thread state in native stub");
       
   746     __ bind(L);
       
   747   }
       
   748 #endif // ASSERT
       
   749   __ set(_thread_in_native, G3_scratch);
       
   750   __ st(G3_scratch, thread_state);
       
   751 
       
   752   // Call the jni method, using the delay slot to set the JNIEnv* argument.
       
   753   __ callr(O0, 0);
       
   754   __ delayed()->
       
   755      add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
       
   756   __ ld_ptr(STATE(_thread), G2_thread);  // restore thread
       
   757 
       
   758   // must we block?
       
   759 
       
   760   // Block, if necessary, before resuming in _thread_in_Java state.
       
   761   // In order for GC to work, don't clear the last_Java_sp until after blocking.
       
   762   { Label no_block;
       
   763     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
       
   764 
       
   765     // Switch thread to "native transition" state before reading the synchronization state.
       
   766     // This additional state is necessary because reading and testing the synchronization
       
   767     // state is not atomic w.r.t. GC, as this scenario demonstrates:
       
   768     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
       
   769     //     VM thread changes sync state to synchronizing and suspends threads for GC.
       
   770     //     Thread A is resumed to finish this native method, but doesn't block here since it
       
   771     //     didn't see any synchronization is progress, and escapes.
       
   772     __ set(_thread_in_native_trans, G3_scratch);
       
   773     __ st(G3_scratch, thread_state);
       
   774     if(os::is_MP()) {
       
   775       // Write serialization page so VM thread can do a pseudo remote membar.
       
   776       // We use the current thread pointer to calculate a thread specific
       
   777       // offset to write to within the page. This minimizes bus traffic
       
   778       // due to cache line collision.
       
   779       __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
       
   780     }
       
   781     __ load_contents(sync_state, G3_scratch);
       
   782     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
       
   783 
       
   784 
       
   785     Label L;
       
   786     Address suspend_state(G2_thread, in_bytes(JavaThread::suspend_flags_offset()));
       
   787     __ br(Assembler::notEqual, false, Assembler::pn, L);
       
   788     __ delayed()->
       
   789       ld(suspend_state, G3_scratch);
       
   790     __ cmp(G3_scratch, 0);
       
   791     __ br(Assembler::equal, false, Assembler::pt, no_block);
       
   792     __ delayed()->nop();
       
   793     __ bind(L);
       
   794 
       
   795     // Block.  Save any potential method result value before the operation and
       
   796     // use a leaf call to leave the last_Java_frame setup undisturbed.
       
   797     save_native_result();
       
   798     __ call_VM_leaf(noreg,
       
   799                     CAST_FROM_FN_PTR(address, JavaThread::check_safepoint_and_suspend_for_native_trans),
       
   800                     G2_thread);
       
   801     __ ld_ptr(STATE(_thread), G2_thread);  // restore thread
       
   802     // Restore any method result value
       
   803     restore_native_result();
       
   804     __ bind(no_block);
       
   805   }
       
   806 
       
   807   // Clear the frame anchor now
       
   808 
       
   809   __ reset_last_Java_frame();
       
   810 
       
   811   // Move the result handler address
       
   812   __ mov(Lscratch, G3_scratch);
       
   813   // return possible result to the outer frame
       
   814 #ifndef __LP64
       
   815   __ mov(O0, I0);
       
   816   __ restore(O1, G0, O1);
       
   817 #else
       
   818   __ restore(O0, G0, O0);
       
   819 #endif /* __LP64 */
       
   820 
       
   821   // Move result handler to expected register
       
   822   __ mov(G3_scratch, Lscratch);
       
   823 
       
   824 
       
   825   // thread state is thread_in_native_trans. Any safepoint blocking has
       
   826   // happened in the trampoline we are ready to switch to thread_in_Java.
       
   827 
       
   828   __ set(_thread_in_Java, G3_scratch);
       
   829   __ st(G3_scratch, thread_state);
       
   830 
       
   831   // If we have an oop result store it where it will be safe for any further gc
       
   832   // until we return now that we've released the handle it might be protected by
       
   833 
       
   834   {
       
   835     Label no_oop, store_result;
       
   836 
       
   837     __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
       
   838     __ cmp(G3_scratch, Lscratch);
       
   839     __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
       
   840     __ delayed()->nop();
       
   841     __ addcc(G0, O0, O0);
       
   842     __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
       
   843     __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
       
   844     __ mov(G0, O0);
       
   845 
       
   846     __ bind(store_result);
       
   847     // Store it where gc will look for it and result handler expects it.
       
   848     __ st_ptr(O0, STATE(_oop_temp));
       
   849 
       
   850     __ bind(no_oop);
       
   851 
       
   852   }
       
   853 
       
   854   // reset handle block
       
   855   __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch);
       
   856   __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
       
   857 
       
   858 
       
   859   // handle exceptions (exception handling will handle unlocking!)
       
   860   { Label L;
       
   861     Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
       
   862 
       
   863     __ ld_ptr(exception_addr, Gtemp);
       
   864     __ tst(Gtemp);
       
   865     __ brx(Assembler::equal, false, Assembler::pt, L);
       
   866     __ delayed()->nop();
       
   867     __ bind(pending_exception_present);
       
   868     // With c++ interpreter we just leave it pending caller will do the correct thing. However...
       
   869     // Like x86 we ignore the result of the native call and leave the method locked. This
       
   870     // seems wrong to leave things locked.
       
   871 
       
   872     __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
       
   873     __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
       
   874 
       
   875     __ bind(L);
       
   876   }
       
   877 
       
   878   // jvmdi/jvmpi support (preserves thread register)
       
   879   __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
       
   880 
       
   881   if (synchronized) {
       
   882     // save and restore any potential method result value around the unlocking operation
       
   883     save_native_result();
       
   884 
       
   885     const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
       
   886     // Get the initial monitor we allocated
       
   887     __ sub(Lstate, entry_size, O1);                        // initial monitor
       
   888     __ unlock_object(O1);
       
   889     restore_native_result();
       
   890   }
       
   891 
       
   892 #if defined(COMPILER2) && !defined(_LP64)
       
   893 
       
   894   // C2 expects long results in G1 we can't tell if we're returning to interpreted
       
   895   // or compiled so just be safe.
       
   896 
       
   897   __ sllx(O0, 32, G1);          // Shift bits into high G1
       
   898   __ srl (O1, 0, O1);           // Zero extend O1
       
   899   __ or3 (O1, G1, G1);          // OR 64 bits into G1
       
   900 
       
   901 #endif /* COMPILER2 && !_LP64 */
       
   902 
       
   903 #ifdef ASSERT
       
   904   {
       
   905     Label ok;
       
   906     __ cmp(I5_savedSP, FP);
       
   907     __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
       
   908     __ delayed()->nop();
       
   909     __ stop("bad I5_savedSP value");
       
   910     __ should_not_reach_here();
       
   911     __ bind(ok);
       
   912   }
       
   913 #endif
       
   914   // Calls result handler which POPS FRAME
       
   915   if (TraceJumps) {
       
   916     // Move target to register that is recordable
       
   917     __ mov(Lscratch, G3_scratch);
       
   918     __ JMP(G3_scratch, 0);
       
   919   } else {
       
   920     __ jmp(Lscratch, 0);
       
   921   }
       
   922   __ delayed()->nop();
       
   923 
       
   924   if (inc_counter) {
       
   925     // handle invocation counter overflow
       
   926     __ bind(invocation_counter_overflow);
       
   927     generate_counter_overflow(Lcontinue);
       
   928   }
       
   929 
       
   930 
       
   931   return entry;
       
   932 }
       
   933 
       
   934 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
       
   935                                                               const Register prev_state,
       
   936                                                               bool native) {
       
   937 
       
   938   // On entry
       
   939   // G5_method - caller's method
       
   940   // Gargs - points to initial parameters (i.e. locals[0])
       
   941   // G2_thread - valid? (C1 only??)
       
   942   // "prev_state" - contains any previous frame manager state which we must save a link
       
   943   //
       
   944   // On return
       
   945   // "state" is a pointer to the newly allocated  state object. We must allocate and initialize
       
   946   // a new interpretState object and the method expression stack.
       
   947 
       
   948   assert_different_registers(state, prev_state);
       
   949   assert_different_registers(prev_state, G3_scratch);
       
   950   const Register Gtmp = G3_scratch;
       
   951   const Address constMethod       (G5_method, in_bytes(Method::const_offset()));
       
   952   const Address access_flags      (G5_method, in_bytes(Method::access_flags_offset()));
       
   953 
       
   954   // slop factor is two extra slots on the expression stack so that
       
   955   // we always have room to store a result when returning from a call without parameters
       
   956   // that returns a result.
       
   957 
       
   958   const int slop_factor = 2*wordSize;
       
   959 
       
   960   const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
       
   961                          Method::extra_stack_entries() + // extra stack for jsr 292
       
   962                          frame::memory_parameter_word_sp_offset +  // register save area + param window
       
   963                          (native ?  frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
       
   964 
       
   965   // XXX G5_method valid
       
   966 
       
   967   // Now compute new frame size
       
   968 
       
   969   if (native) {
       
   970     const Register RconstMethod = Gtmp;
       
   971     const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
       
   972     __ ld_ptr(constMethod, RconstMethod);
       
   973     __ lduh( size_of_parameters, Gtmp );
       
   974     __ calc_mem_param_words(Gtmp, Gtmp);     // space for native call parameters passed on the stack in words
       
   975   } else {
       
   976     // Full size expression stack
       
   977     __ ld_ptr(constMethod, Gtmp);
       
   978     __ lduh(Gtmp, in_bytes(ConstMethod::max_stack_offset()), Gtmp);
       
   979   }
       
   980   __ add(Gtmp, fixed_size, Gtmp);           // plus the fixed portion
       
   981 
       
   982   __ neg(Gtmp);                               // negative space for stack/parameters in words
       
   983   __ and3(Gtmp, -WordsPerLong, Gtmp);        // make multiple of 2 (SP must be 2-word aligned)
       
   984   __ sll(Gtmp, LogBytesPerWord, Gtmp);       // negative space for frame in bytes
       
   985 
       
   986   // Need to do stack size check here before we fault on large frames
       
   987 
       
   988   Label stack_ok;
       
   989 
       
   990   const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
       
   991                                                                               (StackRedPages+StackYellowPages);
       
   992 
       
   993 
       
   994   __ ld_ptr(G2_thread, in_bytes(Thread::stack_base_offset()), O0);
       
   995   __ ld_ptr(G2_thread, in_bytes(Thread::stack_size_offset()), O1);
       
   996   // compute stack bottom
       
   997   __ sub(O0, O1, O0);
       
   998 
       
   999   // Avoid touching the guard pages
       
  1000   // Also a fudge for frame size of BytecodeInterpreter::run
       
  1001   // It varies from 1k->4k depending on build type
       
  1002   const int fudge = 6 * K;
       
  1003 
       
  1004   __ set(fudge + (max_pages * os::vm_page_size()), O1);
       
  1005 
       
  1006   __ add(O0, O1, O0);
       
  1007   __ sub(O0, Gtmp, O0);
       
  1008   __ cmp(SP, O0);
       
  1009   __ brx(Assembler::greaterUnsigned, false, Assembler::pt, stack_ok);
       
  1010   __ delayed()->nop();
       
  1011 
       
  1012      // throw exception return address becomes throwing pc
       
  1013 
       
  1014   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
       
  1015   __ stop("never reached");
       
  1016 
       
  1017   __ bind(stack_ok);
       
  1018 
       
  1019   __ save(SP, Gtmp, SP);                      // setup new frame and register window
       
  1020 
       
  1021   // New window I7 call_stub or previous activation
       
  1022   // O6 - register save area, BytecodeInterpreter just below it, args/locals just above that
       
  1023   //
       
  1024   __ sub(FP, sizeof(BytecodeInterpreter), state);        // Point to new Interpreter state
       
  1025   __ add(state, STACK_BIAS, state );         // Account for 64bit bias
       
  1026 
       
  1027 #define XXX_STATE(field_name) state, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
       
  1028 
       
  1029   // Initialize a new Interpreter state
       
  1030   // orig_sp - caller's original sp
       
  1031   // G2_thread - thread
       
  1032   // Gargs - &locals[0] (unbiased?)
       
  1033   // G5_method - method
       
  1034   // SP (biased) - accounts for full size java stack, BytecodeInterpreter object, register save area, and register parameter save window
       
  1035 
       
  1036 
       
  1037   __ set(0xdead0004, O1);
       
  1038 
       
  1039 
       
  1040   __ st_ptr(Gargs, XXX_STATE(_locals));
       
  1041   __ st_ptr(G0, XXX_STATE(_oop_temp));
       
  1042 
       
  1043   __ st_ptr(state, XXX_STATE(_self_link));                // point to self
       
  1044   __ st_ptr(prev_state->after_save(), XXX_STATE(_prev_link)); // Chain interpreter states
       
  1045   __ st_ptr(G2_thread, XXX_STATE(_thread));               // Store javathread
       
  1046 
       
  1047   if (native) {
       
  1048     __ st_ptr(G0, XXX_STATE(_bcp));
       
  1049   } else {
       
  1050     __ ld_ptr(G5_method, in_bytes(Method::const_offset()), O2); // get ConstMethod*
       
  1051     __ add(O2, in_bytes(ConstMethod::codes_offset()), O2);        // get bcp
       
  1052     __ st_ptr(O2, XXX_STATE(_bcp));
       
  1053   }
       
  1054 
       
  1055   __ st_ptr(G0, XXX_STATE(_mdx));
       
  1056   __ st_ptr(G5_method, XXX_STATE(_method));
       
  1057 
       
  1058   __ set((int) BytecodeInterpreter::method_entry, O1);
       
  1059   __ st(O1, XXX_STATE(_msg));
       
  1060 
       
  1061   __ ld_ptr(constMethod, O3);
       
  1062   __ ld_ptr(O3, in_bytes(ConstMethod::constants_offset()), O3);
       
  1063   __ ld_ptr(O3, ConstantPool::cache_offset_in_bytes(), O2);
       
  1064   __ st_ptr(O2, XXX_STATE(_constants));
       
  1065 
       
  1066   __ st_ptr(G0, XXX_STATE(_result._to_call._callee));
       
  1067 
       
  1068   // Monitor base is just start of BytecodeInterpreter object;
       
  1069   __ mov(state, O2);
       
  1070   __ st_ptr(O2, XXX_STATE(_monitor_base));
       
  1071 
       
  1072   // Do we need a monitor for synchonized method?
       
  1073   {
       
  1074     __ ld(access_flags, O1);
       
  1075     Label done;
       
  1076     Label got_obj;
       
  1077     __ btst(JVM_ACC_SYNCHRONIZED, O1);
       
  1078     __ br( Assembler::zero, false, Assembler::pt, done);
       
  1079 
       
  1080     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
       
  1081     __ delayed()->btst(JVM_ACC_STATIC, O1);
       
  1082     __ ld_ptr(XXX_STATE(_locals), O1);
       
  1083     __ br( Assembler::zero, true, Assembler::pt, got_obj);
       
  1084     __ delayed()->ld_ptr(O1, 0, O1);                  // get receiver for not-static case
       
  1085     __ ld_ptr(constMethod, O1);
       
  1086     __ ld_ptr( O1, in_bytes(ConstMethod::constants_offset()), O1);
       
  1087     __ ld_ptr( O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
       
  1088     // lock the mirror, not the Klass*
       
  1089     __ ld_ptr( O1, mirror_offset, O1);
       
  1090 
       
  1091     __ bind(got_obj);
       
  1092 
       
  1093   #ifdef ASSERT
       
  1094     __ tst(O1);
       
  1095     __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
       
  1096   #endif // ASSERT
       
  1097 
       
  1098     const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
       
  1099     __ sub(SP, entry_size, SP);                         // account for initial monitor
       
  1100     __ sub(O2, entry_size, O2);                        // initial monitor
       
  1101     __ st_ptr(O1, O2, BasicObjectLock::obj_offset_in_bytes()); // and allocate it for interpreter use
       
  1102     __ bind(done);
       
  1103   }
       
  1104 
       
  1105   // Remember initial frame bottom
       
  1106 
       
  1107   __ st_ptr(SP, XXX_STATE(_frame_bottom));
       
  1108 
       
  1109   __ st_ptr(O2, XXX_STATE(_stack_base));
       
  1110 
       
  1111   __ sub(O2, wordSize, O2);                    // prepush
       
  1112   __ st_ptr(O2, XXX_STATE(_stack));                // PREPUSH
       
  1113 
       
  1114   // Full size expression stack
       
  1115   __ ld_ptr(constMethod, O3);
       
  1116   __ lduh(O3, in_bytes(ConstMethod::max_stack_offset()), O3);
       
  1117   __ inc(O3, Method::extra_stack_entries());
       
  1118   __ sll(O3, LogBytesPerWord, O3);
       
  1119   __ sub(O2, O3, O3);
       
  1120 //  __ sub(O3, wordSize, O3);                    // so prepush doesn't look out of bounds
       
  1121   __ st_ptr(O3, XXX_STATE(_stack_limit));
       
  1122 
       
  1123   if (!native) {
       
  1124     //
       
  1125     // Code to initialize locals
       
  1126     //
       
  1127     Register init_value = noreg;    // will be G0 if we must clear locals
       
  1128     // Now zero locals
       
  1129     if (true /* zerolocals */ || ClearInterpreterLocals) {
       
  1130       // explicitly initialize locals
       
  1131       init_value = G0;
       
  1132     } else {
       
  1133     #ifdef ASSERT
       
  1134       // initialize locals to a garbage pattern for better debugging
       
  1135       init_value = O3;
       
  1136       __ set( 0x0F0F0F0F, init_value );
       
  1137     #endif // ASSERT
       
  1138     }
       
  1139     if (init_value != noreg) {
       
  1140       Label clear_loop;
       
  1141       const Register RconstMethod = O1;
       
  1142       const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
       
  1143       const Address size_of_locals    (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
       
  1144 
       
  1145       // NOTE: If you change the frame layout, this code will need to
       
  1146       // be updated!
       
  1147       __ ld_ptr( constMethod, RconstMethod );
       
  1148       __ lduh( size_of_locals, O2 );
       
  1149       __ lduh( size_of_parameters, O1 );
       
  1150       __ sll( O2, LogBytesPerWord, O2);
       
  1151       __ sll( O1, LogBytesPerWord, O1 );
       
  1152       __ ld_ptr(XXX_STATE(_locals), L2_scratch);
       
  1153       __ sub( L2_scratch, O2, O2 );
       
  1154       __ sub( L2_scratch, O1, O1 );
       
  1155 
       
  1156       __ bind( clear_loop );
       
  1157       __ inc( O2, wordSize );
       
  1158 
       
  1159       __ cmp( O2, O1 );
       
  1160       __ br( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
       
  1161       __ delayed()->st_ptr( init_value, O2, 0 );
       
  1162     }
       
  1163   }
       
  1164 }
       
  1165 // Find preallocated  monitor and lock method (C++ interpreter)
       
  1166 //
       
  1167 void CppInterpreterGenerator::lock_method() {
       
  1168 // Lock the current method.
       
  1169 // Destroys registers L2_scratch, L3_scratch, O0
       
  1170 //
       
  1171 // Find everything relative to Lstate
       
  1172 
       
  1173 #ifdef ASSERT
       
  1174   __ ld_ptr(STATE(_method), L2_scratch);
       
  1175   __ ld(L2_scratch, in_bytes(Method::access_flags_offset()), O0);
       
  1176 
       
  1177  { Label ok;
       
  1178    __ btst(JVM_ACC_SYNCHRONIZED, O0);
       
  1179    __ br( Assembler::notZero, false, Assembler::pt, ok);
       
  1180    __ delayed()->nop();
       
  1181    __ stop("method doesn't need synchronization");
       
  1182    __ bind(ok);
       
  1183   }
       
  1184 #endif // ASSERT
       
  1185 
       
  1186   // monitor is already allocated at stack base
       
  1187   // and the lockee is already present
       
  1188   __ ld_ptr(STATE(_stack_base), L2_scratch);
       
  1189   __ ld_ptr(L2_scratch, BasicObjectLock::obj_offset_in_bytes(), O0);   // get object
       
  1190   __ lock_object(L2_scratch, O0);
       
  1191 
       
  1192 }
       
  1193 
       
  1194 //  Generate code for handling resuming a deopted method
       
  1195 void CppInterpreterGenerator::generate_deopt_handling() {
       
  1196 
       
  1197   Label return_from_deopt_common;
       
  1198 
       
  1199   // deopt needs to jump to here to enter the interpreter (return a result)
       
  1200   deopt_frame_manager_return_atos  = __ pc();
       
  1201 
       
  1202   // O0/O1 live
       
  1203   __ ba(return_from_deopt_common);
       
  1204   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_OBJECT), L3_scratch);    // Result stub address array index
       
  1205 
       
  1206 
       
  1207   // deopt needs to jump to here to enter the interpreter (return a result)
       
  1208   deopt_frame_manager_return_btos  = __ pc();
       
  1209 
       
  1210   // O0/O1 live
       
  1211   __ ba(return_from_deopt_common);
       
  1212   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_BOOLEAN), L3_scratch);    // Result stub address array index
       
  1213 
       
  1214   // deopt needs to jump to here to enter the interpreter (return a result)
       
  1215   deopt_frame_manager_return_itos  = __ pc();
       
  1216 
       
  1217   // O0/O1 live
       
  1218   __ ba(return_from_deopt_common);
       
  1219   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_INT), L3_scratch);    // Result stub address array index
       
  1220 
       
  1221   // deopt needs to jump to here to enter the interpreter (return a result)
       
  1222 
       
  1223   deopt_frame_manager_return_ltos  = __ pc();
       
  1224 #if !defined(_LP64) && defined(COMPILER2)
       
  1225   // All return values are where we want them, except for Longs.  C2 returns
       
  1226   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
       
  1227   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
       
  1228   // build even if we are returning from interpreted we just do a little
       
  1229   // stupid shuffing.
       
  1230   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
       
  1231   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
       
  1232   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
       
  1233 
       
  1234   __ srl (G1, 0,O1);
       
  1235   __ srlx(G1,32,O0);
       
  1236 #endif /* !_LP64 && COMPILER2 */
       
  1237   // O0/O1 live
       
  1238   __ ba(return_from_deopt_common);
       
  1239   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_LONG), L3_scratch);    // Result stub address array index
       
  1240 
       
  1241   // deopt needs to jump to here to enter the interpreter (return a result)
       
  1242 
       
  1243   deopt_frame_manager_return_ftos  = __ pc();
       
  1244   // O0/O1 live
       
  1245   __ ba(return_from_deopt_common);
       
  1246   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_FLOAT), L3_scratch);    // Result stub address array index
       
  1247 
       
  1248   // deopt needs to jump to here to enter the interpreter (return a result)
       
  1249   deopt_frame_manager_return_dtos  = __ pc();
       
  1250 
       
  1251   // O0/O1 live
       
  1252   __ ba(return_from_deopt_common);
       
  1253   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_DOUBLE), L3_scratch);    // Result stub address array index
       
  1254 
       
  1255   // deopt needs to jump to here to enter the interpreter (return a result)
       
  1256   deopt_frame_manager_return_vtos  = __ pc();
       
  1257 
       
  1258   // O0/O1 live
       
  1259   __ set(AbstractInterpreter::BasicType_as_index(T_VOID), L3_scratch);
       
  1260 
       
  1261   // Deopt return common
       
  1262   // an index is present that lets us move any possible result being
       
  1263   // return to the interpreter's stack
       
  1264   //
       
  1265   __ bind(return_from_deopt_common);
       
  1266 
       
  1267   // Result if any is in native abi result (O0..O1/F0..F1). The java expression
       
  1268   // stack is in the state that the  calling convention left it.
       
  1269   // Copy the result from native abi result and place it on java expression stack.
       
  1270 
       
  1271   // Current interpreter state is present in Lstate
       
  1272 
       
  1273   // Get current pre-pushed top of interpreter stack
       
  1274   // Any result (if any) is in native abi
       
  1275   // result type index is in L3_scratch
       
  1276 
       
  1277   __ ld_ptr(STATE(_stack), L1_scratch);                                          // get top of java expr stack
       
  1278 
       
  1279   __ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch);
       
  1280   __ sll(L3_scratch, LogBytesPerWord, L3_scratch);
       
  1281   __ ld_ptr(L4_scratch, L3_scratch, Lscratch);                                       // get typed result converter address
       
  1282   __ jmpl(Lscratch, G0, O7);                                         // and convert it
       
  1283   __ delayed()->nop();
       
  1284 
       
  1285   // L1_scratch points to top of stack (prepushed)
       
  1286   __ st_ptr(L1_scratch, STATE(_stack));
       
  1287 }
       
  1288 
       
  1289 // Generate the code to handle a more_monitors message from the c++ interpreter
       
  1290 void CppInterpreterGenerator::generate_more_monitors() {
       
  1291 
       
  1292   Label entry, loop;
       
  1293   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
       
  1294   // 1. compute new pointers                                // esp: old expression stack top
       
  1295   __ delayed()->ld_ptr(STATE(_stack_base), L4_scratch);            // current expression stack bottom
       
  1296   __ sub(L4_scratch, entry_size, L4_scratch);
       
  1297   __ st_ptr(L4_scratch, STATE(_stack_base));
       
  1298 
       
  1299   __ sub(SP, entry_size, SP);                  // Grow stack
       
  1300   __ st_ptr(SP, STATE(_frame_bottom));
       
  1301 
       
  1302   __ ld_ptr(STATE(_stack_limit), L2_scratch);
       
  1303   __ sub(L2_scratch, entry_size, L2_scratch);
       
  1304   __ st_ptr(L2_scratch, STATE(_stack_limit));
       
  1305 
       
  1306   __ ld_ptr(STATE(_stack), L1_scratch);                // Get current stack top
       
  1307   __ sub(L1_scratch, entry_size, L1_scratch);
       
  1308   __ st_ptr(L1_scratch, STATE(_stack));
       
  1309   __ ba(entry);
       
  1310   __ delayed()->add(L1_scratch, wordSize, L1_scratch);        // first real entry (undo prepush)
       
  1311 
       
  1312   // 2. move expression stack
       
  1313 
       
  1314   __ bind(loop);
       
  1315   __ st_ptr(L3_scratch, Address(L1_scratch, 0));
       
  1316   __ add(L1_scratch, wordSize, L1_scratch);
       
  1317   __ bind(entry);
       
  1318   __ cmp(L1_scratch, L4_scratch);
       
  1319   __ br(Assembler::notEqual, false, Assembler::pt, loop);
       
  1320   __ delayed()->ld_ptr(L1_scratch, entry_size, L3_scratch);
       
  1321 
       
  1322   // now zero the slot so we can find it.
       
  1323   __ st_ptr(G0, L4_scratch, BasicObjectLock::obj_offset_in_bytes());
       
  1324 
       
  1325 }
       
  1326 
       
  1327 // Initial entry to C++ interpreter from the call_stub.
       
  1328 // This entry point is called the frame manager since it handles the generation
       
  1329 // of interpreter activation frames via requests directly from the vm (via call_stub)
       
  1330 // and via requests from the interpreter. The requests from the call_stub happen
       
  1331 // directly thru the entry point. Requests from the interpreter happen via returning
       
  1332 // from the interpreter and examining the message the interpreter has returned to
       
  1333 // the frame manager. The frame manager can take the following requests:
       
  1334 
       
  1335 // NO_REQUEST - error, should never happen.
       
  1336 // MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
       
  1337 //                 allocate a new monitor.
       
  1338 // CALL_METHOD - setup a new activation to call a new method. Very similar to what
       
  1339 //               happens during entry during the entry via the call stub.
       
  1340 // RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
       
  1341 //
       
  1342 // Arguments:
       
  1343 //
       
  1344 // ebx: Method*
       
  1345 // ecx: receiver - unused (retrieved from stack as needed)
       
  1346 // esi: previous frame manager state (NULL from the call_stub/c1/c2)
       
  1347 //
       
  1348 //
       
  1349 // Stack layout at entry
       
  1350 //
       
  1351 // [ return address     ] <--- esp
       
  1352 // [ parameter n        ]
       
  1353 //   ...
       
  1354 // [ parameter 1        ]
       
  1355 // [ expression stack   ]
       
  1356 //
       
  1357 //
       
  1358 // We are free to blow any registers we like because the call_stub which brought us here
       
  1359 // initially has preserved the callee save registers already.
       
  1360 //
       
  1361 //
       
  1362 
       
  1363 static address interpreter_frame_manager = NULL;
       
  1364 
       
  1365 #ifdef ASSERT
       
  1366   #define VALIDATE_STATE(scratch, marker)                         \
       
  1367   {                                                               \
       
  1368     Label skip;                                                   \
       
  1369     __ ld_ptr(STATE(_self_link), scratch);                        \
       
  1370     __ cmp(Lstate, scratch);                                      \
       
  1371     __ brx(Assembler::equal, false, Assembler::pt, skip);         \
       
  1372     __ delayed()->nop();                                          \
       
  1373     __ breakpoint_trap();                                         \
       
  1374     __ emit_int32(marker);                                         \
       
  1375     __ bind(skip);                                                \
       
  1376   }
       
  1377 #else
       
  1378   #define VALIDATE_STATE(scratch, marker)
       
  1379 #endif /* ASSERT */
       
  1380 
       
  1381 void CppInterpreterGenerator::adjust_callers_stack(Register args) {
       
  1382 //
       
  1383 // Adjust caller's stack so that all the locals can be contiguous with
       
  1384 // the parameters.
       
  1385 // Worries about stack overflow make this a pain.
       
  1386 //
       
  1387 // Destroys args, G3_scratch, G3_scratch
       
  1388 // In/Out O5_savedSP (sender's original SP)
       
  1389 //
       
  1390 //  assert_different_registers(state, prev_state);
       
  1391   const Register Gtmp = G3_scratch;
       
  1392   const Register RconstMethod = G3_scratch;
       
  1393   const Register tmp = O2;
       
  1394   const Address constMethod(G5_method, in_bytes(Method::const_offset()));
       
  1395   const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
       
  1396   const Address size_of_locals    (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
       
  1397 
       
  1398   __ ld_ptr(constMethod, RconstMethod);
       
  1399   __ lduh(size_of_parameters, tmp);
       
  1400   __ sll(tmp, LogBytesPerWord, Gargs);       // parameter size in bytes
       
  1401   __ add(args, Gargs, Gargs);                // points to first local + BytesPerWord
       
  1402   // NEW
       
  1403   __ add(Gargs, -wordSize, Gargs);             // points to first local[0]
       
  1404   // determine extra space for non-argument locals & adjust caller's SP
       
  1405   // Gtmp1: parameter size in words
       
  1406   __ lduh(size_of_locals, Gtmp);
       
  1407   __ compute_extra_locals_size_in_bytes(tmp, Gtmp, Gtmp);
       
  1408 
       
  1409 #if 1
       
  1410   // c2i adapters place the final interpreter argument in the register save area for O0/I0
       
  1411   // the call_stub will place the final interpreter argument at
       
  1412   // frame::memory_parameter_word_sp_offset. This is mostly not noticable for either asm
       
  1413   // or c++ interpreter. However with the c++ interpreter when we do a recursive call
       
  1414   // and try to make it look good in the debugger we will store the argument to
       
  1415   // RecursiveInterpreterActivation in the register argument save area. Without allocating
       
  1416   // extra space for the compiler this will overwrite locals in the local array of the
       
  1417   // interpreter.
       
  1418   // QQQ still needed with frameless adapters???
       
  1419 
       
  1420   const int c2i_adjust_words = frame::memory_parameter_word_sp_offset - frame::callee_register_argument_save_area_sp_offset;
       
  1421 
       
  1422   __ add(Gtmp, c2i_adjust_words*wordSize, Gtmp);
       
  1423 #endif // 1
       
  1424 
       
  1425 
       
  1426   __ sub(SP, Gtmp, SP);                      // just caller's frame for the additional space we need.
       
  1427 }
       
  1428 
       
  1429 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
       
  1430 
       
  1431   // G5_method: Method*
       
  1432   // G2_thread: thread (unused)
       
  1433   // Gargs:   bottom of args (sender_sp)
       
  1434   // O5: sender's sp
       
  1435 
       
  1436   // A single frame manager is plenty as we don't specialize for synchronized. We could and
       
  1437   // the code is pretty much ready. Would need to change the test below and for good measure
       
  1438   // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
       
  1439   // routines. Not clear this is worth it yet.
       
  1440 
       
  1441   if (interpreter_frame_manager) {
       
  1442     return interpreter_frame_manager;
       
  1443   }
       
  1444 
       
  1445   __ bind(frame_manager_entry);
       
  1446 
       
  1447   // the following temporary registers are used during frame creation
       
  1448   const Register Gtmp1 = G3_scratch;
       
  1449   // const Register Lmirror = L1;     // native mirror (native calls only)
       
  1450 
       
  1451   const Address constMethod       (G5_method, in_bytes(Method::const_offset()));
       
  1452   const Address access_flags      (G5_method, in_bytes(Method::access_flags_offset()));
       
  1453 
       
  1454   address entry_point = __ pc();
       
  1455   __ mov(G0, prevState);                                                 // no current activation
       
  1456 
       
  1457 
       
  1458   Label re_dispatch;
       
  1459 
       
  1460   __ bind(re_dispatch);
       
  1461 
       
  1462   // Interpreter needs to have locals completely contiguous. In order to do that
       
  1463   // We must adjust the caller's stack pointer for any locals beyond just the
       
  1464   // parameters
       
  1465   adjust_callers_stack(Gargs);
       
  1466 
       
  1467   // O5_savedSP still contains sender's sp
       
  1468 
       
  1469   // NEW FRAME
       
  1470 
       
  1471   generate_compute_interpreter_state(Lstate, prevState, false);
       
  1472 
       
  1473   // At this point a new interpreter frame and state object are created and initialized
       
  1474   // Lstate has the pointer to the new activation
       
  1475   // Any stack banging or limit check should already be done.
       
  1476 
       
  1477   Label call_interpreter;
       
  1478 
       
  1479   __ bind(call_interpreter);
       
  1480 
       
  1481 
       
  1482 #if 1
       
  1483   __ set(0xdead002, Lmirror);
       
  1484   __ set(0xdead002, L2_scratch);
       
  1485   __ set(0xdead003, L3_scratch);
       
  1486   __ set(0xdead004, L4_scratch);
       
  1487   __ set(0xdead005, Lscratch);
       
  1488   __ set(0xdead006, Lscratch2);
       
  1489   __ set(0xdead007, L7_scratch);
       
  1490 
       
  1491   __ set(0xdeaf002, O2);
       
  1492   __ set(0xdeaf003, O3);
       
  1493   __ set(0xdeaf004, O4);
       
  1494   __ set(0xdeaf005, O5);
       
  1495 #endif
       
  1496 
       
  1497   // Call interpreter (stack bang complete) enter here if message is
       
  1498   // set and we know stack size is valid
       
  1499 
       
  1500   Label call_interpreter_2;
       
  1501 
       
  1502   __ bind(call_interpreter_2);
       
  1503 
       
  1504 #ifdef ASSERT
       
  1505   {
       
  1506     Label skip;
       
  1507     __ ld_ptr(STATE(_frame_bottom), G3_scratch);
       
  1508     __ cmp(G3_scratch, SP);
       
  1509     __ brx(Assembler::equal, false, Assembler::pt, skip);
       
  1510     __ delayed()->nop();
       
  1511     __ stop("SP not restored to frame bottom");
       
  1512     __ bind(skip);
       
  1513   }
       
  1514 #endif
       
  1515 
       
  1516   VALIDATE_STATE(G3_scratch, 4);
       
  1517   __ set_last_Java_frame(SP, noreg);
       
  1518   __ mov(Lstate, O0);                 // (arg) pointer to current state
       
  1519 
       
  1520   __ call(CAST_FROM_FN_PTR(address,
       
  1521                            JvmtiExport::can_post_interpreter_events() ?
       
  1522                                                                   BytecodeInterpreter::runWithChecks
       
  1523                                                                 : BytecodeInterpreter::run),
       
  1524          relocInfo::runtime_call_type);
       
  1525 
       
  1526   __ delayed()->nop();
       
  1527 
       
  1528   __ ld_ptr(STATE(_thread), G2_thread);
       
  1529   __ reset_last_Java_frame();
       
  1530 
       
  1531   // examine msg from interpreter to determine next action
       
  1532   __ ld_ptr(STATE(_thread), G2_thread);                                  // restore G2_thread
       
  1533 
       
  1534   __ ld(STATE(_msg), L1_scratch);                                       // Get new message
       
  1535 
       
  1536   Label call_method;
       
  1537   Label return_from_interpreted_method;
       
  1538   Label throw_exception;
       
  1539   Label do_OSR;
       
  1540   Label bad_msg;
       
  1541   Label resume_interpreter;
       
  1542 
       
  1543   __ cmp(L1_scratch, (int)BytecodeInterpreter::call_method);
       
  1544   __ br(Assembler::equal, false, Assembler::pt, call_method);
       
  1545   __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::return_from_method);
       
  1546   __ br(Assembler::equal, false, Assembler::pt, return_from_interpreted_method);
       
  1547   __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::throwing_exception);
       
  1548   __ br(Assembler::equal, false, Assembler::pt, throw_exception);
       
  1549   __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::do_osr);
       
  1550   __ br(Assembler::equal, false, Assembler::pt, do_OSR);
       
  1551   __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::more_monitors);
       
  1552   __ br(Assembler::notEqual, false, Assembler::pt, bad_msg);
       
  1553 
       
  1554   // Allocate more monitor space, shuffle expression stack....
       
  1555 
       
  1556   generate_more_monitors();
       
  1557 
       
  1558   // new monitor slot allocated, resume the interpreter.
       
  1559 
       
  1560   __ set((int)BytecodeInterpreter::got_monitors, L1_scratch);
       
  1561   VALIDATE_STATE(G3_scratch, 5);
       
  1562   __ ba(call_interpreter);
       
  1563   __ delayed()->st(L1_scratch, STATE(_msg));
       
  1564 
       
  1565   // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
       
  1566   unctrap_frame_manager_entry  = __ pc();
       
  1567 
       
  1568   // QQQ what message do we send
       
  1569 
       
  1570   __ ba(call_interpreter);
       
  1571   __ delayed()->ld_ptr(STATE(_frame_bottom), SP);                  // restore to full stack frame
       
  1572 
       
  1573   //=============================================================================
       
  1574   // Returning from a compiled method into a deopted method. The bytecode at the
       
  1575   // bcp has completed. The result of the bytecode is in the native abi (the tosca
       
  1576   // for the template based interpreter). Any stack space that was used by the
       
  1577   // bytecode that has completed has been removed (e.g. parameters for an invoke)
       
  1578   // so all that we have to do is place any pending result on the expression stack
       
  1579   // and resume execution on the next bytecode.
       
  1580 
       
  1581   generate_deopt_handling();
       
  1582 
       
  1583   // ready to resume the interpreter
       
  1584 
       
  1585   __ set((int)BytecodeInterpreter::deopt_resume, L1_scratch);
       
  1586   __ ba(call_interpreter);
       
  1587   __ delayed()->st(L1_scratch, STATE(_msg));
       
  1588 
       
  1589   // Current frame has caught an exception we need to dispatch to the
       
  1590   // handler. We can get here because a native interpreter frame caught
       
  1591   // an exception in which case there is no handler and we must rethrow
       
  1592   // If it is a vanilla interpreted frame the we simply drop into the
       
  1593   // interpreter and let it do the lookup.
       
  1594 
       
  1595   Interpreter::_rethrow_exception_entry = __ pc();
       
  1596 
       
  1597   Label return_with_exception;
       
  1598   Label unwind_and_forward;
       
  1599 
       
  1600   // O0: exception
       
  1601   // O7: throwing pc
       
  1602 
       
  1603   // We want exception in the thread no matter what we ultimately decide about frame type.
       
  1604 
       
  1605   Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
       
  1606   __ verify_thread();
       
  1607   __ st_ptr(O0, exception_addr);
       
  1608 
       
  1609   // get the Method*
       
  1610   __ ld_ptr(STATE(_method), G5_method);
       
  1611 
       
  1612   // if this current frame vanilla or native?
       
  1613 
       
  1614   __ ld(access_flags, Gtmp1);
       
  1615   __ btst(JVM_ACC_NATIVE, Gtmp1);
       
  1616   __ br(Assembler::zero, false, Assembler::pt, return_with_exception);  // vanilla interpreted frame handle directly
       
  1617   __ delayed()->nop();
       
  1618 
       
  1619   // We drop thru to unwind a native interpreted frame with a pending exception
       
  1620   // We jump here for the initial interpreter frame with exception pending
       
  1621   // We unwind the current acivation and forward it to our caller.
       
  1622 
       
  1623   __ bind(unwind_and_forward);
       
  1624 
       
  1625   // Unwind frame and jump to forward exception. unwinding will place throwing pc in O7
       
  1626   // as expected by forward_exception.
       
  1627 
       
  1628   __ restore(FP, G0, SP);                  // unwind interpreter state frame
       
  1629   __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
       
  1630   __ delayed()->mov(I5_savedSP->after_restore(), SP);
       
  1631 
       
  1632   // Return point from a call which returns a result in the native abi
       
  1633   // (c1/c2/jni-native). This result must be processed onto the java
       
  1634   // expression stack.
       
  1635   //
       
  1636   // A pending exception may be present in which case there is no result present
       
  1637 
       
  1638   address return_from_native_method = __ pc();
       
  1639 
       
  1640   VALIDATE_STATE(G3_scratch, 6);
       
  1641 
       
  1642   // Result if any is in native abi result (O0..O1/F0..F1). The java expression
       
  1643   // stack is in the state that the  calling convention left it.
       
  1644   // Copy the result from native abi result and place it on java expression stack.
       
  1645 
       
  1646   // Current interpreter state is present in Lstate
       
  1647 
       
  1648   // Exception pending?
       
  1649 
       
  1650   __ ld_ptr(STATE(_frame_bottom), SP);                             // restore to full stack frame
       
  1651   __ ld_ptr(exception_addr, Lscratch);                                         // get any pending exception
       
  1652   __ tst(Lscratch);                                                            // exception pending?
       
  1653   __ brx(Assembler::notZero, false, Assembler::pt, return_with_exception);
       
  1654   __ delayed()->nop();
       
  1655 
       
  1656   // Process the native abi result to java expression stack
       
  1657 
       
  1658   __ ld_ptr(STATE(_result._to_call._callee), L4_scratch);                        // called method
       
  1659   __ ld_ptr(STATE(_stack), L1_scratch);                                          // get top of java expr stack
       
  1660   // get parameter size
       
  1661   __ ld_ptr(L4_scratch, in_bytes(Method::const_offset()), L2_scratch);
       
  1662   __ lduh(L2_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), L2_scratch);
       
  1663   __ sll(L2_scratch, LogBytesPerWord, L2_scratch     );                           // parameter size in bytes
       
  1664   __ add(L1_scratch, L2_scratch, L1_scratch);                                      // stack destination for result
       
  1665   __ ld(L4_scratch, in_bytes(Method::result_index_offset()), L3_scratch); // called method result type index
       
  1666 
       
  1667   // tosca is really just native abi
       
  1668   __ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch);
       
  1669   __ sll(L3_scratch, LogBytesPerWord, L3_scratch);
       
  1670   __ ld_ptr(L4_scratch, L3_scratch, Lscratch);                                       // get typed result converter address
       
  1671   __ jmpl(Lscratch, G0, O7);                                                   // and convert it
       
  1672   __ delayed()->nop();
       
  1673 
       
  1674   // L1_scratch points to top of stack (prepushed)
       
  1675 
       
  1676   __ ba(resume_interpreter);
       
  1677   __ delayed()->mov(L1_scratch, O1);
       
  1678 
       
  1679   // An exception is being caught on return to a vanilla interpreter frame.
       
  1680   // Empty the stack and resume interpreter
       
  1681 
       
  1682   __ bind(return_with_exception);
       
  1683 
       
  1684   __ ld_ptr(STATE(_frame_bottom), SP);                             // restore to full stack frame
       
  1685   __ ld_ptr(STATE(_stack_base), O1);                               // empty java expression stack
       
  1686   __ ba(resume_interpreter);
       
  1687   __ delayed()->sub(O1, wordSize, O1);                             // account for prepush
       
  1688 
       
  1689   // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
       
  1690   // interpreter call, or native) and unwind this interpreter activation.
       
  1691   // All monitors should be unlocked.
       
  1692 
       
  1693   __ bind(return_from_interpreted_method);
       
  1694 
       
  1695   VALIDATE_STATE(G3_scratch, 7);
       
  1696 
       
  1697   Label return_to_initial_caller;
       
  1698 
       
  1699   // Interpreted result is on the top of the completed activation expression stack.
       
  1700   // We must return it to the top of the callers stack if caller was interpreted
       
  1701   // otherwise we convert to native abi result and return to call_stub/c1/c2
       
  1702   // The caller's expression stack was truncated by the call however the current activation
       
  1703   // has enough stuff on the stack that we have usable space there no matter what. The
       
  1704   // other thing that makes it easy is that the top of the caller's stack is stored in STATE(_locals)
       
  1705   // for the current activation
       
  1706 
       
  1707   __ ld_ptr(STATE(_prev_link), L1_scratch);
       
  1708   __ ld_ptr(STATE(_method), L2_scratch);                               // get method just executed
       
  1709   __ ld(L2_scratch, in_bytes(Method::result_index_offset()), L2_scratch);
       
  1710   __ tst(L1_scratch);
       
  1711   __ brx(Assembler::zero, false, Assembler::pt, return_to_initial_caller);
       
  1712   __ delayed()->sll(L2_scratch, LogBytesPerWord, L2_scratch);
       
  1713 
       
  1714   // Copy result to callers java stack
       
  1715 
       
  1716   __ set((intptr_t)CppInterpreter::_stack_to_stack, L4_scratch);
       
  1717   __ ld_ptr(L4_scratch, L2_scratch, Lscratch);                          // get typed result converter address
       
  1718   __ ld_ptr(STATE(_stack), O0);                                       // current top (prepushed)
       
  1719   __ ld_ptr(STATE(_locals), O1);                                      // stack destination
       
  1720 
       
  1721   // O0 - will be source, O1 - will be destination (preserved)
       
  1722   __ jmpl(Lscratch, G0, O7);                                          // and convert it
       
  1723   __ delayed()->add(O0, wordSize, O0);                                // get source (top of current expr stack)
       
  1724 
       
  1725   // O1 == &locals[0]
       
  1726 
       
  1727   // Result is now on caller's stack. Just unwind current activation and resume
       
  1728 
       
  1729   Label unwind_recursive_activation;
       
  1730 
       
  1731 
       
  1732   __ bind(unwind_recursive_activation);
       
  1733 
       
  1734   // O1 == &locals[0] (really callers stacktop) for activation now returning
       
  1735   // returning to interpreter method from "recursive" interpreter call
       
  1736   // result converter left O1 pointing to top of the( prepushed) java stack for method we are returning
       
  1737   // to. Now all we must do is unwind the state from the completed call
       
  1738 
       
  1739   // Must restore stack
       
  1740   VALIDATE_STATE(G3_scratch, 8);
       
  1741 
       
  1742   // Return to interpreter method after a method call (interpreted/native/c1/c2) has completed.
       
  1743   // Result if any is already on the caller's stack. All we must do now is remove the now dead
       
  1744   // frame and tell interpreter to resume.
       
  1745 
       
  1746 
       
  1747   __ mov(O1, I1);                                                     // pass back new stack top across activation
       
  1748   // POP FRAME HERE ==================================
       
  1749   __ restore(FP, G0, SP);                                             // unwind interpreter state frame
       
  1750   __ ld_ptr(STATE(_frame_bottom), SP);                                // restore to full stack frame
       
  1751 
       
  1752 
       
  1753   // Resume the interpreter. The current frame contains the current interpreter
       
  1754   // state object.
       
  1755   //
       
  1756   // O1 == new java stack pointer
       
  1757 
       
  1758   __ bind(resume_interpreter);
       
  1759   VALIDATE_STATE(G3_scratch, 10);
       
  1760 
       
  1761   // A frame we have already used before so no need to bang stack so use call_interpreter_2 entry
       
  1762 
       
  1763   __ set((int)BytecodeInterpreter::method_resume, L1_scratch);
       
  1764   __ st(L1_scratch, STATE(_msg));
       
  1765   __ ba(call_interpreter_2);
       
  1766   __ delayed()->st_ptr(O1, STATE(_stack));
       
  1767 
       
  1768   // interpreter returning to native code (call_stub/c1/c2)
       
  1769   // convert result and unwind initial activation
       
  1770   // L2_scratch - scaled result type index
       
  1771 
       
  1772   __ bind(return_to_initial_caller);
       
  1773 
       
  1774   __ set((intptr_t)CppInterpreter::_stack_to_native_abi, L4_scratch);
       
  1775   __ ld_ptr(L4_scratch, L2_scratch, Lscratch);                           // get typed result converter address
       
  1776   __ ld_ptr(STATE(_stack), O0);                                        // current top (prepushed)
       
  1777   __ jmpl(Lscratch, G0, O7);                                           // and convert it
       
  1778   __ delayed()->add(O0, wordSize, O0);                                 // get source (top of current expr stack)
       
  1779 
       
  1780   Label unwind_initial_activation;
       
  1781   __ bind(unwind_initial_activation);
       
  1782 
       
  1783   // RETURN TO CALL_STUB/C1/C2 code (result if any in I0..I1/(F0/..F1)
       
  1784   // we can return here with an exception that wasn't handled by interpreted code
       
  1785   // how does c1/c2 see it on return?
       
  1786 
       
  1787   // compute resulting sp before/after args popped depending upon calling convention
       
  1788   // __ ld_ptr(STATE(_saved_sp), Gtmp1);
       
  1789   //
       
  1790   // POP FRAME HERE ==================================
       
  1791   __ restore(FP, G0, SP);
       
  1792   __ retl();
       
  1793   __ delayed()->mov(I5_savedSP->after_restore(), SP);
       
  1794 
       
  1795   // OSR request, unwind the current frame and transfer to the OSR entry
       
  1796   // and enter OSR nmethod
       
  1797 
       
  1798   __ bind(do_OSR);
       
  1799   Label remove_initial_frame;
       
  1800   __ ld_ptr(STATE(_prev_link), L1_scratch);
       
  1801   __ ld_ptr(STATE(_result._osr._osr_buf), G1_scratch);
       
  1802 
       
  1803   // We are going to pop this frame. Is there another interpreter frame underneath
       
  1804   // it or is it callstub/compiled?
       
  1805 
       
  1806   __ tst(L1_scratch);
       
  1807   __ brx(Assembler::zero, false, Assembler::pt, remove_initial_frame);
       
  1808   __ delayed()->ld_ptr(STATE(_result._osr._osr_entry), G3_scratch);
       
  1809 
       
  1810   // Frame underneath is an interpreter frame simply unwind
       
  1811   // POP FRAME HERE ==================================
       
  1812   __ restore(FP, G0, SP);                                             // unwind interpreter state frame
       
  1813   __ mov(I5_savedSP->after_restore(), SP);
       
  1814 
       
  1815   // Since we are now calling native need to change our "return address" from the
       
  1816   // dummy RecursiveInterpreterActivation to a return from native
       
  1817 
       
  1818   __ set((intptr_t)return_from_native_method - 8, O7);
       
  1819 
       
  1820   __ jmpl(G3_scratch, G0, G0);
       
  1821   __ delayed()->mov(G1_scratch, O0);
       
  1822 
       
  1823   __ bind(remove_initial_frame);
       
  1824 
       
  1825   // POP FRAME HERE ==================================
       
  1826   __ restore(FP, G0, SP);
       
  1827   __ mov(I5_savedSP->after_restore(), SP);
       
  1828   __ jmpl(G3_scratch, G0, G0);
       
  1829   __ delayed()->mov(G1_scratch, O0);
       
  1830 
       
  1831   // Call a new method. All we do is (temporarily) trim the expression stack
       
  1832   // push a return address to bring us back to here and leap to the new entry.
       
  1833   // At this point we have a topmost frame that was allocated by the frame manager
       
  1834   // which contains the current method interpreted state. We trim this frame
       
  1835   // of excess java expression stack entries and then recurse.
       
  1836 
       
  1837   __ bind(call_method);
       
  1838 
       
  1839   // stack points to next free location and not top element on expression stack
       
  1840   // method expects sp to be pointing to topmost element
       
  1841 
       
  1842   __ ld_ptr(STATE(_thread), G2_thread);
       
  1843   __ ld_ptr(STATE(_result._to_call._callee), G5_method);
       
  1844 
       
  1845 
       
  1846   // SP already takes in to account the 2 extra words we use for slop
       
  1847   // when we call a "static long no_params()" method. So if
       
  1848   // we trim back sp by the amount of unused java expression stack
       
  1849   // there will be automagically the 2 extra words we need.
       
  1850   // We also have to worry about keeping SP aligned.
       
  1851 
       
  1852   __ ld_ptr(STATE(_stack), Gargs);
       
  1853   __ ld_ptr(STATE(_stack_limit), L1_scratch);
       
  1854 
       
  1855   // compute the unused java stack size
       
  1856   __ sub(Gargs, L1_scratch, L2_scratch);                       // compute unused space
       
  1857 
       
  1858   // Round down the unused space to that stack is always 16-byte aligned
       
  1859   // by making the unused space a multiple of the size of two longs.
       
  1860 
       
  1861   __ and3(L2_scratch, -2*BytesPerLong, L2_scratch);
       
  1862 
       
  1863   // Now trim the stack
       
  1864   __ add(SP, L2_scratch, SP);
       
  1865 
       
  1866 
       
  1867   // Now point to the final argument (account for prepush)
       
  1868   __ add(Gargs, wordSize, Gargs);
       
  1869 #ifdef ASSERT
       
  1870   // Make sure we have space for the window
       
  1871   __ sub(Gargs, SP, L1_scratch);
       
  1872   __ cmp(L1_scratch, 16*wordSize);
       
  1873   {
       
  1874     Label skip;
       
  1875     __ brx(Assembler::greaterEqual, false, Assembler::pt, skip);
       
  1876     __ delayed()->nop();
       
  1877     __ stop("killed stack");
       
  1878     __ bind(skip);
       
  1879   }
       
  1880 #endif // ASSERT
       
  1881 
       
  1882   // Create a new frame where we can store values that make it look like the interpreter
       
  1883   // really recursed.
       
  1884 
       
  1885   // prepare to recurse or call specialized entry
       
  1886 
       
  1887   // First link the registers we need
       
  1888 
       
  1889   // make the pc look good in debugger
       
  1890   __ set(CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation), O7);
       
  1891   // argument too
       
  1892   __ mov(Lstate, I0);
       
  1893 
       
  1894   // Record our sending SP
       
  1895   __ mov(SP, O5_savedSP);
       
  1896 
       
  1897   __ ld_ptr(STATE(_result._to_call._callee_entry_point), L2_scratch);
       
  1898   __ set((intptr_t) entry_point, L1_scratch);
       
  1899   __ cmp(L1_scratch, L2_scratch);
       
  1900   __ brx(Assembler::equal, false, Assembler::pt, re_dispatch);
       
  1901   __ delayed()->mov(Lstate, prevState);                                // link activations
       
  1902 
       
  1903   // method uses specialized entry, push a return so we look like call stub setup
       
  1904   // this path will handle fact that result is returned in registers and not
       
  1905   // on the java stack.
       
  1906 
       
  1907   __ set((intptr_t)return_from_native_method - 8, O7);
       
  1908   __ jmpl(L2_scratch, G0, G0);                               // Do specialized entry
       
  1909   __ delayed()->nop();
       
  1910 
       
  1911   //
       
  1912   // Bad Message from interpreter
       
  1913   //
       
  1914   __ bind(bad_msg);
       
  1915   __ stop("Bad message from interpreter");
       
  1916 
       
  1917   // Interpreted method "returned" with an exception pass it on...
       
  1918   // Pass result, unwind activation and continue/return to interpreter/call_stub
       
  1919   // We handle result (if any) differently based on return to interpreter or call_stub
       
  1920 
       
  1921   __ bind(throw_exception);
       
  1922   __ ld_ptr(STATE(_prev_link), L1_scratch);
       
  1923   __ tst(L1_scratch);
       
  1924   __ brx(Assembler::zero, false, Assembler::pt, unwind_and_forward);
       
  1925   __ delayed()->nop();
       
  1926 
       
  1927   __ ld_ptr(STATE(_locals), O1); // get result of popping callee's args
       
  1928   __ ba(unwind_recursive_activation);
       
  1929   __ delayed()->nop();
       
  1930 
       
  1931   interpreter_frame_manager = entry_point;
       
  1932   return entry_point;
       
  1933 }
       
  1934 
       
  1935 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
       
  1936  : CppInterpreterGenerator(code) {
       
  1937    generate_all(); // down here so it can be "virtual"
       
  1938 }
       
  1939 
       
  1940 
       
  1941 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
       
  1942 
       
  1943   // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
       
  1944   // expression stack, the callee will have callee_extra_locals (so we can account for
       
  1945   // frame extension) and monitor_size for monitors. Basically we need to calculate
       
  1946   // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
       
  1947   //
       
  1948   //
       
  1949   // The big complicating thing here is that we must ensure that the stack stays properly
       
  1950   // aligned. This would be even uglier if monitor size wasn't modulo what the stack
       
  1951   // needs to be aligned for). We are given that the sp (fp) is already aligned by
       
  1952   // the caller so we must ensure that it is properly aligned for our callee.
       
  1953   //
       
  1954   // Ths c++ interpreter always makes sure that we have a enough extra space on the
       
  1955   // stack at all times to deal with the "stack long no_params()" method issue. This
       
  1956   // is "slop_factor" here.
       
  1957   const int slop_factor = 2;
       
  1958 
       
  1959   const int fixed_size = sizeof(BytecodeInterpreter)/wordSize +           // interpreter state object
       
  1960                          frame::memory_parameter_word_sp_offset;   // register save area + param window
       
  1961   return (round_to(max_stack +
       
  1962                    slop_factor +
       
  1963                    fixed_size +
       
  1964                    monitor_size +
       
  1965                    (callee_extra_locals * Interpreter::stackElementWords), WordsPerLong));
       
  1966 
       
  1967 }
       
  1968 
       
  1969 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
       
  1970 
       
  1971   // See call_stub code
       
  1972   int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
       
  1973                                  WordsPerLong);    // 7 + register save area
       
  1974 
       
  1975   // Save space for one monitor to get into the interpreted method in case
       
  1976   // the method is synchronized
       
  1977   int monitor_size    = method->is_synchronized() ?
       
  1978                                 1*frame::interpreter_frame_monitor_size() : 0;
       
  1979   return size_activation_helper(method->max_locals(), method->max_stack(),
       
  1980                                 monitor_size) + call_stub_size;
       
  1981 }
       
  1982 
       
  1983 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
       
  1984                                            frame* caller,
       
  1985                                            frame* current,
       
  1986                                            Method* method,
       
  1987                                            intptr_t* locals,
       
  1988                                            intptr_t* stack,
       
  1989                                            intptr_t* stack_base,
       
  1990                                            intptr_t* monitor_base,
       
  1991                                            intptr_t* frame_bottom,
       
  1992                                            bool is_top_frame
       
  1993                                            )
       
  1994 {
       
  1995   // What about any vtable?
       
  1996   //
       
  1997   to_fill->_thread = JavaThread::current();
       
  1998   // This gets filled in later but make it something recognizable for now
       
  1999   to_fill->_bcp = method->code_base();
       
  2000   to_fill->_locals = locals;
       
  2001   to_fill->_constants = method->constants()->cache();
       
  2002   to_fill->_method = method;
       
  2003   to_fill->_mdx = NULL;
       
  2004   to_fill->_stack = stack;
       
  2005   if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
       
  2006     to_fill->_msg = deopt_resume2;
       
  2007   } else {
       
  2008     to_fill->_msg = method_resume;
       
  2009   }
       
  2010   to_fill->_result._to_call._bcp_advance = 0;
       
  2011   to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
       
  2012   to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
       
  2013   to_fill->_prev_link = NULL;
       
  2014 
       
  2015   // Fill in the registers for the frame
       
  2016 
       
  2017   // Need to install _sender_sp. Actually not too hard in C++!
       
  2018   // When the skeletal frames are layed out we fill in a value
       
  2019   // for _sender_sp. That value is only correct for the oldest
       
  2020   // skeletal frame constructed (because there is only a single
       
  2021   // entry for "caller_adjustment". While the skeletal frames
       
  2022   // exist that is good enough. We correct that calculation
       
  2023   // here and get all the frames correct.
       
  2024 
       
  2025   // to_fill->_sender_sp = locals - (method->size_of_parameters() - 1);
       
  2026 
       
  2027   *current->register_addr(Lstate) = (intptr_t) to_fill;
       
  2028   // skeletal already places a useful value here and this doesn't account
       
  2029   // for alignment so don't bother.
       
  2030   // *current->register_addr(I5_savedSP) =     (intptr_t) locals - (method->size_of_parameters() - 1);
       
  2031 
       
  2032   if (caller->is_interpreted_frame()) {
       
  2033     interpreterState prev  = caller->get_interpreterState();
       
  2034     to_fill->_prev_link = prev;
       
  2035     // Make the prev callee look proper
       
  2036     prev->_result._to_call._callee = method;
       
  2037     if (*prev->_bcp == Bytecodes::_invokeinterface) {
       
  2038       prev->_result._to_call._bcp_advance = 5;
       
  2039     } else {
       
  2040       prev->_result._to_call._bcp_advance = 3;
       
  2041     }
       
  2042   }
       
  2043   to_fill->_oop_temp = NULL;
       
  2044   to_fill->_stack_base = stack_base;
       
  2045   // Need +1 here because stack_base points to the word just above the first expr stack entry
       
  2046   // and stack_limit is supposed to point to the word just below the last expr stack entry.
       
  2047   // See generate_compute_interpreter_state.
       
  2048   to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
       
  2049   to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
       
  2050 
       
  2051   // sparc specific
       
  2052   to_fill->_frame_bottom = frame_bottom;
       
  2053   to_fill->_self_link = to_fill;
       
  2054 #ifdef ASSERT
       
  2055   to_fill->_native_fresult = 123456.789;
       
  2056   to_fill->_native_lresult = CONST64(0xdeadcafedeafcafe);
       
  2057 #endif
       
  2058 }
       
  2059 
       
  2060 void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp) {
       
  2061   istate->_last_Java_pc = (intptr_t*) last_Java_pc;
       
  2062 }
       
  2063 
       
  2064 static int frame_size_helper(int max_stack,
       
  2065                              int moncount,
       
  2066                              int callee_param_size,
       
  2067                              int callee_locals_size,
       
  2068                              bool is_top_frame,
       
  2069                              int& monitor_size,
       
  2070                              int& full_frame_words) {
       
  2071   int extra_locals_size = callee_locals_size - callee_param_size;
       
  2072   monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
       
  2073   full_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
       
  2074   int short_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
       
  2075   int frame_words = is_top_frame ? full_frame_words : short_frame_words;
       
  2076 
       
  2077   return frame_words;
       
  2078 }
       
  2079 
       
  2080 int AbstractInterpreter::size_activation(int max_stack,
       
  2081                                          int tempcount,
       
  2082                                          int extra_args,
       
  2083                                          int moncount,
       
  2084                                          int callee_param_size,
       
  2085                                          int callee_locals_size,
       
  2086                                          bool is_top_frame) {
       
  2087   assert(extra_args == 0, "NEED TO FIX");
       
  2088   // NOTE: return size is in words not bytes
       
  2089   // Calculate the amount our frame will be adjust by the callee. For top frame
       
  2090   // this is zero.
       
  2091 
       
  2092   // NOTE: ia64 seems to do this wrong (or at least backwards) in that it
       
  2093   // calculates the extra locals based on itself. Not what the callee does
       
  2094   // to it. So it ignores last_frame_adjust value. Seems suspicious as far
       
  2095   // as getting sender_sp correct.
       
  2096 
       
  2097   int unused_monitor_size = 0;
       
  2098   int unused_full_frame_words = 0;
       
  2099   return frame_size_helper(max_stack, moncount, callee_param_size, callee_locals_size, is_top_frame,
       
  2100                            unused_monitor_size, unused_full_frame_words);
       
  2101 }
       
  2102 void AbstractInterpreter::layout_activation(Method* method,
       
  2103                                             int tempcount, // Number of slots on java expression stack in use
       
  2104                                             int popframe_extra_args,
       
  2105                                             int moncount,  // Number of active monitors
       
  2106                                             int caller_actual_parameters,
       
  2107                                             int callee_param_size,
       
  2108                                             int callee_locals_size,
       
  2109                                             frame* caller,
       
  2110                                             frame* interpreter_frame,
       
  2111                                             bool is_top_frame,
       
  2112                                             bool is_bottom_frame) {
       
  2113   assert(popframe_extra_args == 0, "NEED TO FIX");
       
  2114   // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
       
  2115   // does as far as allocating an interpreter frame.
       
  2116   // Set up the method, locals, and monitors.
       
  2117   // The frame interpreter_frame is guaranteed to be the right size,
       
  2118   // as determined by a previous call to the size_activation() method.
       
  2119   // It is also guaranteed to be walkable even though it is in a skeletal state
       
  2120   // NOTE: tempcount is the current size of the java expression stack. For top most
       
  2121   //       frames we will allocate a full sized expression stack and not the curback
       
  2122   //       version that non-top frames have.
       
  2123 
       
  2124   int monitor_size = 0;
       
  2125   int full_frame_words = 0;
       
  2126   int frame_words = frame_size_helper(method->max_stack(), moncount, callee_param_size, callee_locals_size,
       
  2127                                       is_top_frame, monitor_size, full_frame_words);
       
  2128 
       
  2129   /*
       
  2130     We must now fill in all the pieces of the frame. This means both
       
  2131     the interpreterState and the registers.
       
  2132   */
       
  2133 
       
  2134   // MUCHO HACK
       
  2135 
       
  2136   intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
       
  2137   // 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
       
  2138   assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
       
  2139   frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
       
  2140 
       
  2141   /* Now fillin the interpreterState object */
       
  2142 
       
  2143   interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() -  sizeof(BytecodeInterpreter));
       
  2144 
       
  2145 
       
  2146   intptr_t* locals;
       
  2147 
       
  2148   // Calculate the postion of locals[0]. This is painful because of
       
  2149   // stack alignment (same as ia64). The problem is that we can
       
  2150   // not compute the location of locals from fp(). fp() will account
       
  2151   // for the extra locals but it also accounts for aligning the stack
       
  2152   // and we can't determine if the locals[0] was misaligned but max_locals
       
  2153   // was enough to have the
       
  2154   // calculate postion of locals. fp already accounts for extra locals.
       
  2155   // +2 for the static long no_params() issue.
       
  2156 
       
  2157   if (caller->is_interpreted_frame()) {
       
  2158     // locals must agree with the caller because it will be used to set the
       
  2159     // caller's tos when we return.
       
  2160     interpreterState prev  = caller->get_interpreterState();
       
  2161     // stack() is prepushed.
       
  2162     locals = prev->stack() + method->size_of_parameters();
       
  2163   } else {
       
  2164     // Lay out locals block in the caller adjacent to the register window save area.
       
  2165     //
       
  2166     // Compiled frames do not allocate a varargs area which is why this if
       
  2167     // statement is needed.
       
  2168     //
       
  2169     intptr_t* fp = interpreter_frame->fp();
       
  2170     int local_words = method->max_locals() * Interpreter::stackElementWords;
       
  2171 
       
  2172     if (caller->is_compiled_frame()) {
       
  2173       locals = fp + frame::register_save_words + local_words - 1;
       
  2174     } else {
       
  2175       locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
       
  2176     }
       
  2177 
       
  2178   }
       
  2179   // END MUCHO HACK
       
  2180 
       
  2181   intptr_t* monitor_base = (intptr_t*) cur_state;
       
  2182   intptr_t* stack_base =  monitor_base - monitor_size;
       
  2183   /* +1 because stack is always prepushed */
       
  2184   intptr_t* stack = stack_base - (tempcount + 1);
       
  2185 
       
  2186 
       
  2187   BytecodeInterpreter::layout_interpreterState(cur_state,
       
  2188                                                caller,
       
  2189                                                interpreter_frame,
       
  2190                                                method,
       
  2191                                                locals,
       
  2192                                                stack,
       
  2193                                                stack_base,
       
  2194                                                monitor_base,
       
  2195                                                frame_bottom,
       
  2196                                                is_top_frame);
       
  2197 
       
  2198   BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
       
  2199 }
       
  2200 
       
  2201 #endif // CC_INTERP