hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
changeset 21198 dd647e8d1d72
parent 21095 1a04f7b3946e
child 23844 0c29a324ae14
equal deleted inserted replaced
21101:02624d68a712 21198:dd647e8d1d72
   148   __ dispatch_next(state);
   148   __ dispatch_next(state);
   149   return entry;
   149   return entry;
   150 }
   150 }
   151 
   151 
   152 
   152 
   153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
   154   TosState incoming_state = state;
       
   155   address entry = __ pc();
   154   address entry = __ pc();
   156 
   155 
   157 #ifdef COMPILER2
   156 #ifdef COMPILER2
   158   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
   157   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
   159   if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
   158   if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
   160     for (int i = 1; i < 8; i++) {
   159     for (int i = 1; i < 8; i++) {
   161         __ ffree(i);
   160         __ ffree(i);
   162     }
   161     }
   163   } else if (UseSSE < 2) {
   162   } else if (UseSSE < 2) {
   164     __ empty_FPU_stack();
   163     __ empty_FPU_stack();
   165   }
   164   }
   166 #endif
   165 #endif
   167   if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
   166   if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
   168     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
   167     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
   169   } else {
   168   } else {
   170     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
   169     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
   171   }
   170   }
   172 
   171 
   173   // In SSE mode, interpreter returns FP results in xmm0 but they need
   172   // In SSE mode, interpreter returns FP results in xmm0 but they need
   174   // to end up back on the FPU so it can operate on them.
   173   // to end up back on the FPU so it can operate on them.
   175   if (incoming_state == ftos && UseSSE >= 1) {
   174   if (state == ftos && UseSSE >= 1) {
   176     __ subptr(rsp, wordSize);
   175     __ subptr(rsp, wordSize);
   177     __ movflt(Address(rsp, 0), xmm0);
   176     __ movflt(Address(rsp, 0), xmm0);
   178     __ fld_s(Address(rsp, 0));
   177     __ fld_s(Address(rsp, 0));
   179     __ addptr(rsp, wordSize);
   178     __ addptr(rsp, wordSize);
   180   } else if (incoming_state == dtos && UseSSE >= 2) {
   179   } else if (state == dtos && UseSSE >= 2) {
   181     __ subptr(rsp, 2*wordSize);
   180     __ subptr(rsp, 2*wordSize);
   182     __ movdbl(Address(rsp, 0), xmm0);
   181     __ movdbl(Address(rsp, 0), xmm0);
   183     __ fld_d(Address(rsp, 0));
   182     __ fld_d(Address(rsp, 0));
   184     __ addptr(rsp, 2*wordSize);
   183     __ addptr(rsp, 2*wordSize);
   185   }
   184   }
   192   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   191   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   193 
   192 
   194   __ restore_bcp();
   193   __ restore_bcp();
   195   __ restore_locals();
   194   __ restore_locals();
   196 
   195 
   197   if (incoming_state == atos) {
   196   if (state == atos) {
   198     Register mdp = rbx;
   197     Register mdp = rbx;
   199     Register tmp = rcx;
   198     Register tmp = rcx;
   200     __ profile_return_type(mdp, rax, tmp);
   199     __ profile_return_type(mdp, rax, tmp);
   201   }
   200   }
   202 
   201 
   203   Label L_got_cache, L_giant_index;
   202   const Register cache = rbx;
   204   if (EnableInvokeDynamic) {
   203   const Register index = rcx;
   205     __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
   204   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
   206     __ jcc(Assembler::equal, L_giant_index);
   205 
   207   }
   206   const Register flags = cache;
   208   __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
   207   __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
   209   __ bind(L_got_cache);
   208   __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
   210   __ movl(rbx, Address(rbx, rcx,
   209   __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
   211                     Address::times_ptr, ConstantPoolCache::base_offset() +
       
   212                     ConstantPoolCacheEntry::flags_offset()));
       
   213   __ andptr(rbx, 0xFF);
       
   214   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
       
   215   __ dispatch_next(state, step);
   210   __ dispatch_next(state, step);
   216 
       
   217   // out of the main line of code...
       
   218   if (EnableInvokeDynamic) {
       
   219     __ bind(L_giant_index);
       
   220     __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
       
   221     __ jmp(L_got_cache);
       
   222   }
       
   223 
   211 
   224   return entry;
   212   return entry;
   225 }
   213 }
   226 
   214 
   227 
   215