hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
changeset 46381 020219e46c86
parent 46369 3bf4544bec14
child 46461 7155d5ff2d07
equal deleted inserted replaced
46380:4a51438196cf 46381:020219e46c86
    55 // Size of interpreter code.  Increase if too small.  Interpreter will
    55 // Size of interpreter code.  Increase if too small.  Interpreter will
    56 // fail with a guarantee ("not enough space for interpreter generation");
    56 // fail with a guarantee ("not enough space for interpreter generation");
    57 // if too small.
    57 // if too small.
    58 // Run with +PrintInterpreter to get the VM to print out the size.
    58 // Run with +PrintInterpreter to get the VM to print out the size.
    59 // Max size with JVMTI
    59 // Max size with JVMTI
    60 #ifdef _LP64
    60 // The sethi() instruction generates lots more instructions when shell
    61   // The sethi() instruction generates lots more instructions when shell
    61 // stack limit is unlimited, so that's why this is much bigger.
    62   // stack limit is unlimited, so that's why this is much bigger.
       
    63 int TemplateInterpreter::InterpreterCodeSize = 260 * K;
    62 int TemplateInterpreter::InterpreterCodeSize = 260 * K;
    64 #else
       
    65 int TemplateInterpreter::InterpreterCodeSize = 230 * K;
       
    66 #endif
       
    67 
    63 
    68 // Generation of Interpreter
    64 // Generation of Interpreter
    69 //
    65 //
    70 // The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
    66 // The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
    71 
    67 
    73 #define __ _masm->
    69 #define __ _masm->
    74 
    70 
    75 
    71 
    76 //----------------------------------------------------------------------------------------------------
    72 //----------------------------------------------------------------------------------------------------
    77 
    73 
    78 #ifndef _LP64
       
    79 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
       
    80   address entry = __ pc();
       
    81   Argument argv(0, true);
       
    82 
       
    83   // We are in the jni transition frame. Save the last_java_frame corresponding to the
       
    84   // outer interpreter frame
       
    85   //
       
    86   __ set_last_Java_frame(FP, noreg);
       
    87   // make sure the interpreter frame we've pushed has a valid return pc
       
    88   __ mov(O7, I7);
       
    89   __ mov(Lmethod, G3_scratch);
       
    90   __ mov(Llocals, G4_scratch);
       
    91   __ save_frame(0);
       
    92   __ mov(G2_thread, L7_thread_cache);
       
    93   __ add(argv.address_in_frame(), O3);
       
    94   __ mov(G2_thread, O0);
       
    95   __ mov(G3_scratch, O1);
       
    96   __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
       
    97   __ delayed()->mov(G4_scratch, O2);
       
    98   __ mov(L7_thread_cache, G2_thread);
       
    99   __ reset_last_Java_frame();
       
   100 
       
   101   // load the register arguments (the C code packed them as varargs)
       
   102   for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
       
   103       __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
       
   104   }
       
   105   __ ret();
       
   106   __ delayed()->
       
   107      restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
       
   108   return entry;
       
   109 }
       
   110 
       
   111 
       
   112 #else
       
   113 // LP64 passes floating point arguments in F1, F3, F5, etc. instead of
    74 // LP64 passes floating point arguments in F1, F3, F5, etc. instead of
   114 // O0, O1, O2 etc..
    75 // O0, O1, O2 etc..
   115 // Doubles are passed in D0, D2, D4
    76 // Doubles are passed in D0, D2, D4
   116 // We store the signature of the first 16 arguments in the first argument
    77 // We store the signature of the first 16 arguments in the first argument
   117 // slot because it will be overwritten prior to calling the native
    78 // slot because it will be overwritten prior to calling the native
   204   __ ret();
   165   __ ret();
   205   __ delayed()->
   166   __ delayed()->
   206      restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
   167      restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
   207   return entry;
   168   return entry;
   208 }
   169 }
   209 #endif
       
   210 
   170 
   211 void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
   171 void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
   212 
   172 
   213   // Generate code to initiate compilation on the counter overflow.
   173   // Generate code to initiate compilation on the counter overflow.
   214 
   174 
   251   // result potentially in F0/F1: save it across calls
   211   // result potentially in F0/F1: save it across calls
   252   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
   212   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
   253 
   213 
   254   // save and restore any potential method result value around the unlocking operation
   214   // save and restore any potential method result value around the unlocking operation
   255   __ stf(FloatRegisterImpl::D, F0, d_tmp);
   215   __ stf(FloatRegisterImpl::D, F0, d_tmp);
   256 #ifdef _LP64
       
   257   __ stx(O0, l_tmp);
   216   __ stx(O0, l_tmp);
   258 #else
       
   259   __ std(O0, l_tmp);
       
   260 #endif
       
   261 }
   217 }
   262 
   218 
   263 void TemplateInterpreterGenerator::restore_native_result(void) {
   219 void TemplateInterpreterGenerator::restore_native_result(void) {
   264   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
   220   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
   265   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
   221   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
   266 
   222 
   267   // Restore any method result value
   223   // Restore any method result value
   268   __ ldf(FloatRegisterImpl::D, d_tmp, F0);
   224   __ ldf(FloatRegisterImpl::D, d_tmp, F0);
   269 #ifdef _LP64
       
   270   __ ldx(l_tmp, O0);
   225   __ ldx(l_tmp, O0);
   271 #else
       
   272   __ ldd(l_tmp, O0);
       
   273 #endif
       
   274 }
   226 }
   275 
   227 
   276 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
   228 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
   277   assert(!pass_oop || message == NULL, "either oop or message but not both");
   229   assert(!pass_oop || message == NULL, "either oop or message but not both");
   278   address entry = __ pc();
   230   address entry = __ pc();
   337   address entry = __ pc();
   289   address entry = __ pc();
   338 
   290 
   339   if (state == atos) {
   291   if (state == atos) {
   340     __ profile_return_type(O0, G3_scratch, G1_scratch);
   292     __ profile_return_type(O0, G3_scratch, G1_scratch);
   341   }
   293   }
   342 
       
   343 #if !defined(_LP64) && defined(COMPILER2)
       
   344   // All return values are where we want them, except for Longs.  C2 returns
       
   345   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
       
   346   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
       
   347   // build even if we are returning from interpreted we just do a little
       
   348   // stupid shuffing.
       
   349   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
       
   350   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
       
   351   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
       
   352 
       
   353   if (state == ltos) {
       
   354     __ srl (G1,  0, O1);
       
   355     __ srlx(G1, 32, O0);
       
   356   }
       
   357 #endif // !_LP64 && COMPILER2
       
   358 
   294 
   359   // The callee returns with the stack possibly adjusted by adapter transition
   295   // The callee returns with the stack possibly adjusted by adapter transition
   360   // We remove that possible adjustment here.
   296   // We remove that possible adjustment here.
   361   // All interpreter local registers are untouched. Any result is passed back
   297   // All interpreter local registers are untouched. Any result is passed back
   362   // in the O0/O1 or float registers. Before continuing, the arguments must be
   298   // in the O0/O1 or float registers. Before continuing, the arguments must be
   440     case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
   376     case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
   441     case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
   377     case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
   442     case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
   378     case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
   443     case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
   379     case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
   444     case T_LONG   :
   380     case T_LONG   :
   445 #ifndef _LP64
       
   446                     __ mov(O1, Itos_l2);  // move other half of long
       
   447 #endif              // ifdef or no ifdef, fall through to the T_INT case
       
   448     case T_INT    : __ mov(O0, Itos_i);                         break;
   381     case T_INT    : __ mov(O0, Itos_i);                         break;
   449     case T_VOID   : /* nothing to do */                         break;
   382     case T_VOID   : /* nothing to do */                         break;
   450     case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
   383     case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
   451     case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
   384     case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
   452     case T_OBJECT :
   385     case T_OBJECT :
   882   Register mirror = LcpoolCache;
   815   Register mirror = LcpoolCache;
   883   __ load_mirror(mirror, Lmethod);
   816   __ load_mirror(mirror, Lmethod);
   884   __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
   817   __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
   885   __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
   818   __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
   886   __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
   819   __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
   887 #ifdef _LP64
       
   888   __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
   820   __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
   889 #endif
       
   890   __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
   821   __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
   891 
   822 
   892   // setup interpreter activation registers
   823   // setup interpreter activation registers
   893   __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
   824   __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
   894 
   825 
  1479   __ reset_last_Java_frame();
  1410   __ reset_last_Java_frame();
  1480 
  1411 
  1481   // Move the result handler address
  1412   // Move the result handler address
  1482   __ mov(Lscratch, G3_scratch);
  1413   __ mov(Lscratch, G3_scratch);
  1483   // return possible result to the outer frame
  1414   // return possible result to the outer frame
  1484 #ifndef __LP64
       
  1485   __ mov(O0, I0);
       
  1486   __ restore(O1, G0, O1);
       
  1487 #else
       
  1488   __ restore(O0, G0, O0);
  1415   __ restore(O0, G0, O0);
  1489 #endif /* __LP64 */
       
  1490 
  1416 
  1491   // Move result handler to expected register
  1417   // Move result handler to expected register
  1492   __ mov(G3_scratch, Lscratch);
  1418   __ mov(G3_scratch, Lscratch);
  1493 
  1419 
  1494   // Back in normal (native) interpreter frame. State is thread_in_native_trans
  1420   // Back in normal (native) interpreter frame. State is thread_in_native_trans
  1564     __ unlock_object(O1);
  1490     __ unlock_object(O1);
  1565 
  1491 
  1566     restore_native_result();
  1492     restore_native_result();
  1567   }
  1493   }
  1568 
  1494 
  1569 #if defined(COMPILER2) && !defined(_LP64)
       
  1570 
       
  1571   // C2 expects long results in G1 we can't tell if we're returning to interpreted
       
  1572   // or compiled so just be safe.
       
  1573 
       
  1574   __ sllx(O0, 32, G1);          // Shift bits into high G1
       
  1575   __ srl (O1, 0, O1);           // Zero extend O1
       
  1576   __ or3 (O1, G1, G1);          // OR 64 bits into G1
       
  1577 
       
  1578 #endif /* COMPILER2 && !_LP64 */
       
  1579 
       
  1580   // dispose of return address and remove activation
  1495   // dispose of return address and remove activation
  1581 #ifdef ASSERT
  1496 #ifdef ASSERT
  1582   {
  1497   {
  1583     Label ok;
  1498     Label ok;
  1584     __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
  1499     __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);