hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
changeset 34746 be3fcc9b7a78
parent 34745 995895614a53
parent 34741 6f034bebd711
child 34748 3b2cde99bd99
equal deleted inserted replaced
34745:995895614a53 34746:be3fcc9b7a78
     1 /*
       
     2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "asm/macroAssembler.hpp"
       
    27 #include "interpreter/bytecodeHistogram.hpp"
       
    28 #include "interpreter/interpreter.hpp"
       
    29 #include "interpreter/interpreterGenerator.hpp"
       
    30 #include "interpreter/interpreterRuntime.hpp"
       
    31 #include "interpreter/interp_masm.hpp"
       
    32 #include "interpreter/templateTable.hpp"
       
    33 #include "oops/arrayOop.hpp"
       
    34 #include "oops/methodData.hpp"
       
    35 #include "oops/method.hpp"
       
    36 #include "oops/oop.inline.hpp"
       
    37 #include "prims/jvmtiExport.hpp"
       
    38 #include "prims/jvmtiThreadState.hpp"
       
    39 #include "runtime/arguments.hpp"
       
    40 #include "runtime/deoptimization.hpp"
       
    41 #include "runtime/frame.inline.hpp"
       
    42 #include "runtime/sharedRuntime.hpp"
       
    43 #include "runtime/stubRoutines.hpp"
       
    44 #include "runtime/synchronizer.hpp"
       
    45 #include "runtime/timer.hpp"
       
    46 #include "runtime/vframeArray.hpp"
       
    47 #include "utilities/debug.hpp"
       
    48 #include "utilities/macros.hpp"
       
    49 
       
    50 #define __ _masm->
       
    51 
       
    52 #ifndef CC_INTERP
       
    53 
       
    54 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
       
    55 const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
       
    56 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
       
    57 
       
    58 //-----------------------------------------------------------------------------
       
    59 
       
    60 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
       
    61   address entry = __ pc();
       
    62 
       
    63 #ifdef ASSERT
       
    64   {
       
    65     Label L;
       
    66     __ lea(rax, Address(rbp,
       
    67                         frame::interpreter_frame_monitor_block_top_offset *
       
    68                         wordSize));
       
    69     __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
       
    70                          // grows negative)
       
    71     __ jcc(Assembler::aboveEqual, L); // check if frame is complete
       
    72     __ stop ("interpreter frame not set up");
       
    73     __ bind(L);
       
    74   }
       
    75 #endif // ASSERT
       
    76   // Restore bcp under the assumption that the current frame is still
       
    77   // interpreted
       
    78   __ restore_bcp();
       
    79 
       
    80   // expression stack must be empty before entering the VM if an
       
    81   // exception happened
       
    82   __ empty_expression_stack();
       
    83   // throw exception
       
    84   __ call_VM(noreg,
       
    85              CAST_FROM_FN_PTR(address,
       
    86                               InterpreterRuntime::throw_StackOverflowError));
       
    87   return entry;
       
    88 }
       
    89 
       
    90 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
       
    91         const char* name) {
       
    92   address entry = __ pc();
       
    93   // expression stack must be empty before entering the VM if an
       
    94   // exception happened
       
    95   __ empty_expression_stack();
       
    96   // setup parameters
       
    97   // ??? convention: expect aberrant index in register ebx
       
    98   __ lea(c_rarg1, ExternalAddress((address)name));
       
    99   __ call_VM(noreg,
       
   100              CAST_FROM_FN_PTR(address,
       
   101                               InterpreterRuntime::
       
   102                               throw_ArrayIndexOutOfBoundsException),
       
   103              c_rarg1, rbx);
       
   104   return entry;
       
   105 }
       
   106 
       
   107 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
       
   108   address entry = __ pc();
       
   109 
       
   110   // object is at TOS
       
   111   __ pop(c_rarg1);
       
   112 
       
   113   // expression stack must be empty before entering the VM if an
       
   114   // exception happened
       
   115   __ empty_expression_stack();
       
   116 
       
   117   __ call_VM(noreg,
       
   118              CAST_FROM_FN_PTR(address,
       
   119                               InterpreterRuntime::
       
   120                               throw_ClassCastException),
       
   121              c_rarg1);
       
   122   return entry;
       
   123 }
       
   124 
       
   125 address TemplateInterpreterGenerator::generate_exception_handler_common(
       
   126         const char* name, const char* message, bool pass_oop) {
       
   127   assert(!pass_oop || message == NULL, "either oop or message but not both");
       
   128   address entry = __ pc();
       
   129   if (pass_oop) {
       
   130     // object is at TOS
       
   131     __ pop(c_rarg2);
       
   132   }
       
   133   // expression stack must be empty before entering the VM if an
       
   134   // exception happened
       
   135   __ empty_expression_stack();
       
   136   // setup parameters
       
   137   __ lea(c_rarg1, ExternalAddress((address)name));
       
   138   if (pass_oop) {
       
   139     __ call_VM(rax, CAST_FROM_FN_PTR(address,
       
   140                                      InterpreterRuntime::
       
   141                                      create_klass_exception),
       
   142                c_rarg1, c_rarg2);
       
   143   } else {
       
   144     // kind of lame ExternalAddress can't take NULL because
       
   145     // external_word_Relocation will assert.
       
   146     if (message != NULL) {
       
   147       __ lea(c_rarg2, ExternalAddress((address)message));
       
   148     } else {
       
   149       __ movptr(c_rarg2, NULL_WORD);
       
   150     }
       
   151     __ call_VM(rax,
       
   152                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
       
   153                c_rarg1, c_rarg2);
       
   154   }
       
   155   // throw exception
       
   156   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
       
   157   return entry;
       
   158 }
       
   159 
       
   160 
       
   161 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
       
   162   address entry = __ pc();
       
   163   // NULL last_sp until next java call
       
   164   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
       
   165   __ dispatch_next(state);
       
   166   return entry;
       
   167 }
       
   168 
       
   169 
       
   170 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
       
   171   address entry = __ pc();
       
   172 
       
   173   // Restore stack bottom in case i2c adjusted stack
       
   174   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
       
   175   // and NULL it as marker that esp is now tos until next java call
       
   176   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
       
   177 
       
   178   __ restore_bcp();
       
   179   __ restore_locals();
       
   180 
       
   181   if (state == atos) {
       
   182     Register mdp = rbx;
       
   183     Register tmp = rcx;
       
   184     __ profile_return_type(mdp, rax, tmp);
       
   185   }
       
   186 
       
   187   const Register cache = rbx;
       
   188   const Register index = rcx;
       
   189   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
       
   190 
       
   191   const Register flags = cache;
       
   192   __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
       
   193   __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
       
   194   __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
       
   195   __ dispatch_next(state, step);
       
   196 
       
   197   return entry;
       
   198 }
       
   199 
       
   200 
       
   201 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
       
   202   address entry = __ pc();
       
   203   // NULL last_sp until next java call
       
   204   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
       
   205   __ restore_bcp();
       
   206   __ restore_locals();
       
   207 #if INCLUDE_JVMCI
       
   208   // Check if we need to take lock at entry of synchronized method.
       
   209   if (UseJVMCICompiler) {
       
   210     Label L;
       
   211     __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
       
   212     __ jcc(Assembler::zero, L);
       
   213     // Clear flag.
       
   214     __ movb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
       
   215     // Satisfy calling convention for lock_method().
       
   216     __ get_method(rbx);
       
   217     // Take lock.
       
   218     lock_method();
       
   219     __ bind(L);
       
   220   }
       
   221 #endif
       
   222   // handle exceptions
       
   223   {
       
   224     Label L;
       
   225     __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
       
   226     __ jcc(Assembler::zero, L);
       
   227     __ call_VM(noreg,
       
   228                CAST_FROM_FN_PTR(address,
       
   229                                 InterpreterRuntime::throw_pending_exception));
       
   230     __ should_not_reach_here();
       
   231     __ bind(L);
       
   232   }
       
   233   __ dispatch_next(state, step);
       
   234   return entry;
       
   235 }
       
   236 
       
   237 int AbstractInterpreter::BasicType_as_index(BasicType type) {
       
   238   int i = 0;
       
   239   switch (type) {
       
   240     case T_BOOLEAN: i = 0; break;
       
   241     case T_CHAR   : i = 1; break;
       
   242     case T_BYTE   : i = 2; break;
       
   243     case T_SHORT  : i = 3; break;
       
   244     case T_INT    : i = 4; break;
       
   245     case T_LONG   : i = 5; break;
       
   246     case T_VOID   : i = 6; break;
       
   247     case T_FLOAT  : i = 7; break;
       
   248     case T_DOUBLE : i = 8; break;
       
   249     case T_OBJECT : i = 9; break;
       
   250     case T_ARRAY  : i = 9; break;
       
   251     default       : ShouldNotReachHere();
       
   252   }
       
   253   assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
       
   254          "index out of bounds");
       
   255   return i;
       
   256 }
       
   257 
       
   258 
       
   259 address TemplateInterpreterGenerator::generate_result_handler_for(
       
   260         BasicType type) {
       
   261   address entry = __ pc();
       
   262   switch (type) {
       
   263   case T_BOOLEAN: __ c2bool(rax);            break;
       
   264   case T_CHAR   : __ movzwl(rax, rax);       break;
       
   265   case T_BYTE   : __ sign_extend_byte(rax);  break;
       
   266   case T_SHORT  : __ sign_extend_short(rax); break;
       
   267   case T_INT    : /* nothing to do */        break;
       
   268   case T_LONG   : /* nothing to do */        break;
       
   269   case T_VOID   : /* nothing to do */        break;
       
   270   case T_FLOAT  : /* nothing to do */        break;
       
   271   case T_DOUBLE : /* nothing to do */        break;
       
   272   case T_OBJECT :
       
   273     // retrieve result from frame
       
   274     __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
       
   275     // and verify it
       
   276     __ verify_oop(rax);
       
   277     break;
       
   278   default       : ShouldNotReachHere();
       
   279   }
       
   280   __ ret(0);                                   // return from result handler
       
   281   return entry;
       
   282 }
       
   283 
       
   284 address TemplateInterpreterGenerator::generate_safept_entry_for(
       
   285         TosState state,
       
   286         address runtime_entry) {
       
   287   address entry = __ pc();
       
   288   __ push(state);
       
   289   __ call_VM(noreg, runtime_entry);
       
   290   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
       
   291   return entry;
       
   292 }
       
   293 
       
   294 
       
   295 
       
   296 // Helpers for commoning out cases in the various type of method entries.
       
   297 //
       
   298 
       
   299 
       
   300 // increment invocation count & check for overflow
       
   301 //
       
   302 // Note: checking for negative value instead of overflow
       
   303 //       so we have a 'sticky' overflow test
       
   304 //
       
   305 // rbx: method
       
   306 // ecx: invocation counter
       
   307 //
       
   308 void InterpreterGenerator::generate_counter_incr(
       
   309         Label* overflow,
       
   310         Label* profile_method,
       
   311         Label* profile_method_continue) {
       
   312   Label done;
       
   313   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
       
   314   if (TieredCompilation) {
       
   315     int increment = InvocationCounter::count_increment;
       
   316     Label no_mdo;
       
   317     if (ProfileInterpreter) {
       
   318       // Are we profiling?
       
   319       __ movptr(rax, Address(rbx, Method::method_data_offset()));
       
   320       __ testptr(rax, rax);
       
   321       __ jccb(Assembler::zero, no_mdo);
       
   322       // Increment counter in the MDO
       
   323       const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
       
   324                                                 in_bytes(InvocationCounter::counter_offset()));
       
   325       const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
       
   326       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
       
   327       __ jmp(done);
       
   328     }
       
   329     __ bind(no_mdo);
       
   330     // Increment counter in MethodCounters
       
   331     const Address invocation_counter(rax,
       
   332                   MethodCounters::invocation_counter_offset() +
       
   333                   InvocationCounter::counter_offset());
       
   334     __ get_method_counters(rbx, rax, done);
       
   335     const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
       
   336     __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
       
   337                                false, Assembler::zero, overflow);
       
   338     __ bind(done);
       
   339   } else { // not TieredCompilation
       
   340     const Address backedge_counter(rax,
       
   341                   MethodCounters::backedge_counter_offset() +
       
   342                   InvocationCounter::counter_offset());
       
   343     const Address invocation_counter(rax,
       
   344                   MethodCounters::invocation_counter_offset() +
       
   345                   InvocationCounter::counter_offset());
       
   346 
       
   347     __ get_method_counters(rbx, rax, done);
       
   348 
       
   349     if (ProfileInterpreter) {
       
   350       __ incrementl(Address(rax,
       
   351               MethodCounters::interpreter_invocation_counter_offset()));
       
   352     }
       
   353     // Update standard invocation counters
       
   354     __ movl(rcx, invocation_counter);
       
   355     __ incrementl(rcx, InvocationCounter::count_increment);
       
   356     __ movl(invocation_counter, rcx); // save invocation count
       
   357 
       
   358     __ movl(rax, backedge_counter);   // load backedge counter
       
   359     __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
       
   360 
       
   361     __ addl(rcx, rax);                // add both counters
       
   362 
       
   363     // profile_method is non-null only for interpreted method so
       
   364     // profile_method != NULL == !native_call
       
   365 
       
   366     if (ProfileInterpreter && profile_method != NULL) {
       
   367       // Test to see if we should create a method data oop
       
   368       __ movptr(rax, Address(rbx, Method::method_counters_offset()));
       
   369       __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
       
   370       __ jcc(Assembler::less, *profile_method_continue);
       
   371 
       
   372       // if no method data exists, go to profile_method
       
   373       __ test_method_data_pointer(rax, *profile_method);
       
   374     }
       
   375 
       
   376     __ movptr(rax, Address(rbx, Method::method_counters_offset()));
       
   377     __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
       
   378     __ jcc(Assembler::aboveEqual, *overflow);
       
   379     __ bind(done);
       
   380   }
       
   381 }
       
   382 
       
   383 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
       
   384 
       
   385   // Asm interpreter on entry
       
   386   // r14 - locals
       
   387   // r13 - bcp
       
   388   // rbx - method
       
   389   // edx - cpool --- DOES NOT APPEAR TO BE TRUE
       
   390   // rbp - interpreter frame
       
   391 
       
   392   // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
       
   393   // Everything as it was on entry
       
   394   // rdx is not restored. Doesn't appear to really be set.
       
   395 
       
   396   // InterpreterRuntime::frequency_counter_overflow takes two
       
   397   // arguments, the first (thread) is passed by call_VM, the second
       
   398   // indicates if the counter overflow occurs at a backwards branch
       
   399   // (NULL bcp).  We pass zero for it.  The call returns the address
       
   400   // of the verified entry point for the method or NULL if the
       
   401   // compilation did not complete (either went background or bailed
       
   402   // out).
       
   403   __ movl(c_rarg1, 0);
       
   404   __ call_VM(noreg,
       
   405              CAST_FROM_FN_PTR(address,
       
   406                               InterpreterRuntime::frequency_counter_overflow),
       
   407              c_rarg1);
       
   408 
       
   409   __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
       
   410   // Preserve invariant that r13/r14 contain bcp/locals of sender frame
       
   411   // and jump to the interpreted entry.
       
   412   __ jmp(*do_continue, relocInfo::none);
       
   413 }
       
   414 
       
   415 // See if we've got enough room on the stack for locals plus overhead.
       
   416 // The expression stack grows down incrementally, so the normal guard
       
   417 // page mechanism will work for that.
       
   418 //
       
   419 // NOTE: Since the additional locals are also always pushed (wasn't
       
   420 // obvious in generate_fixed_frame) so the guard should work for them
       
   421 // too.
       
   422 //
       
   423 // Args:
       
   424 //      rdx: number of additional locals this frame needs (what we must check)
       
   425 //      rbx: Method*
       
   426 //
       
   427 // Kills:
       
   428 //      rax
       
   429 void InterpreterGenerator::generate_stack_overflow_check(void) {
       
   430 
       
   431   // monitor entry size: see picture of stack in frame_x86.hpp
       
   432   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
       
   433 
       
   434   // total overhead size: entry_size + (saved rbp through expr stack
       
   435   // bottom).  be sure to change this if you add/subtract anything
       
   436   // to/from the overhead area
       
   437   const int overhead_size =
       
   438     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
       
   439 
       
   440   const int page_size = os::vm_page_size();
       
   441 
       
   442   Label after_frame_check;
       
   443 
       
   444   // see if the frame is greater than one page in size. If so,
       
   445   // then we need to verify there is enough stack space remaining
       
   446   // for the additional locals.
       
   447   __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
       
   448   __ jcc(Assembler::belowEqual, after_frame_check);
       
   449 
       
   450   // compute rsp as if this were going to be the last frame on
       
   451   // the stack before the red zone
       
   452 
       
   453   const Address stack_base(r15_thread, Thread::stack_base_offset());
       
   454   const Address stack_size(r15_thread, Thread::stack_size_offset());
       
   455 
       
   456   // locals + overhead, in bytes
       
   457   __ mov(rax, rdx);
       
   458   __ shlptr(rax, Interpreter::logStackElementSize);  // 2 slots per parameter.
       
   459   __ addptr(rax, overhead_size);
       
   460 
       
   461 #ifdef ASSERT
       
   462   Label stack_base_okay, stack_size_okay;
       
   463   // verify that thread stack base is non-zero
       
   464   __ cmpptr(stack_base, (int32_t)NULL_WORD);
       
   465   __ jcc(Assembler::notEqual, stack_base_okay);
       
   466   __ stop("stack base is zero");
       
   467   __ bind(stack_base_okay);
       
   468   // verify that thread stack size is non-zero
       
   469   __ cmpptr(stack_size, 0);
       
   470   __ jcc(Assembler::notEqual, stack_size_okay);
       
   471   __ stop("stack size is zero");
       
   472   __ bind(stack_size_okay);
       
   473 #endif
       
   474 
       
   475   // Add stack base to locals and subtract stack size
       
   476   __ addptr(rax, stack_base);
       
   477   __ subptr(rax, stack_size);
       
   478 
       
   479   // Use the maximum number of pages we might bang.
       
   480   const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
       
   481                                                                               (StackRedPages+StackYellowPages);
       
   482 
       
   483   // add in the red and yellow zone sizes
       
   484   __ addptr(rax, max_pages * page_size);
       
   485 
       
   486   // check against the current stack bottom
       
   487   __ cmpptr(rsp, rax);
       
   488   __ jcc(Assembler::above, after_frame_check);
       
   489 
       
   490   // Restore sender's sp as SP. This is necessary if the sender's
       
   491   // frame is an extended compiled frame (see gen_c2i_adapter())
       
   492   // and safer anyway in case of JSR292 adaptations.
       
   493 
       
   494   __ pop(rax); // return address must be moved if SP is changed
       
   495   __ mov(rsp, r13);
       
   496   __ push(rax);
       
   497 
       
   498   // Note: the restored frame is not necessarily interpreted.
       
   499   // Use the shared runtime version of the StackOverflowError.
       
   500   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
       
   501   __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
       
   502 
       
   503   // all done with frame size check
       
   504   __ bind(after_frame_check);
       
   505 }
       
   506 
       
   507 // Allocate monitor and lock method (asm interpreter)
       
   508 //
       
   509 // Args:
       
   510 //      rbx: Method*
       
   511 //      r14: locals
       
   512 //
       
   513 // Kills:
       
   514 //      rax
       
   515 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
       
   516 //      rscratch1, rscratch2 (scratch regs)
       
   517 void TemplateInterpreterGenerator::lock_method() {
       
   518   // synchronize method
       
   519   const Address access_flags(rbx, Method::access_flags_offset());
       
   520   const Address monitor_block_top(
       
   521         rbp,
       
   522         frame::interpreter_frame_monitor_block_top_offset * wordSize);
       
   523   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
       
   524 
       
   525 #ifdef ASSERT
       
   526   {
       
   527     Label L;
       
   528     __ movl(rax, access_flags);
       
   529     __ testl(rax, JVM_ACC_SYNCHRONIZED);
       
   530     __ jcc(Assembler::notZero, L);
       
   531     __ stop("method doesn't need synchronization");
       
   532     __ bind(L);
       
   533   }
       
   534 #endif // ASSERT
       
   535 
       
   536   // get synchronization object
       
   537   {
       
   538     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
       
   539     Label done;
       
   540     __ movl(rax, access_flags);
       
   541     __ testl(rax, JVM_ACC_STATIC);
       
   542     // get receiver (assume this is frequent case)
       
   543     __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
       
   544     __ jcc(Assembler::zero, done);
       
   545     __ movptr(rax, Address(rbx, Method::const_offset()));
       
   546     __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
       
   547     __ movptr(rax, Address(rax,
       
   548                            ConstantPool::pool_holder_offset_in_bytes()));
       
   549     __ movptr(rax, Address(rax, mirror_offset));
       
   550 
       
   551 #ifdef ASSERT
       
   552     {
       
   553       Label L;
       
   554       __ testptr(rax, rax);
       
   555       __ jcc(Assembler::notZero, L);
       
   556       __ stop("synchronization object is NULL");
       
   557       __ bind(L);
       
   558     }
       
   559 #endif // ASSERT
       
   560 
       
   561     __ bind(done);
       
   562   }
       
   563 
       
   564   // add space for monitor & lock
       
   565   __ subptr(rsp, entry_size); // add space for a monitor entry
       
   566   __ movptr(monitor_block_top, rsp);  // set new monitor block top
       
   567   // store object
       
   568   __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
       
   569   __ movptr(c_rarg1, rsp); // object address
       
   570   __ lock_object(c_rarg1);
       
   571 }
       
   572 
       
   573 // Generate a fixed interpreter frame. This is identical setup for
       
   574 // interpreted methods and for native methods hence the shared code.
       
   575 //
       
   576 // Args:
       
   577 //      rax: return address
       
   578 //      rbx: Method*
       
   579 //      r14: pointer to locals
       
   580 //      r13: sender sp
       
   581 //      rdx: cp cache
       
   582 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
       
   583   // initialize fixed part of activation frame
       
   584   __ push(rax);        // save return address
       
   585   __ enter();          // save old & set new rbp
       
   586   __ push(r13);        // set sender sp
       
   587   __ push((int)NULL_WORD); // leave last_sp as null
       
   588   __ movptr(r13, Address(rbx, Method::const_offset()));      // get ConstMethod*
       
   589   __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
       
   590   __ push(rbx);        // save Method*
       
   591   if (ProfileInterpreter) {
       
   592     Label method_data_continue;
       
   593     __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
       
   594     __ testptr(rdx, rdx);
       
   595     __ jcc(Assembler::zero, method_data_continue);
       
   596     __ addptr(rdx, in_bytes(MethodData::data_offset()));
       
   597     __ bind(method_data_continue);
       
   598     __ push(rdx);      // set the mdp (method data pointer)
       
   599   } else {
       
   600     __ push(0);
       
   601   }
       
   602 
       
   603   __ movptr(rdx, Address(rbx, Method::const_offset()));
       
   604   __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
       
   605   __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
       
   606   __ push(rdx); // set constant pool cache
       
   607   __ push(r14); // set locals pointer
       
   608   if (native_call) {
       
   609     __ push(0); // no bcp
       
   610   } else {
       
   611     __ push(r13); // set bcp
       
   612   }
       
   613   __ push(0); // reserve word for pointer to expression stack bottom
       
   614   __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
       
   615 }
       
   616 
       
   617 // End of helpers
       
   618 
       
   619 // Method entry for java.lang.ref.Reference.get.
       
   620 address InterpreterGenerator::generate_Reference_get_entry(void) {
       
   621 #if INCLUDE_ALL_GCS
       
   622   // Code: _aload_0, _getfield, _areturn
       
   623   // parameter size = 1
       
   624   //
       
   625   // The code that gets generated by this routine is split into 2 parts:
       
   626   //    1. The "intrinsified" code for G1 (or any SATB based GC),
       
   627   //    2. The slow path - which is an expansion of the regular method entry.
       
   628   //
       
   629   // Notes:-
       
   630   // * In the G1 code we do not check whether we need to block for
       
   631   //   a safepoint. If G1 is enabled then we must execute the specialized
       
   632   //   code for Reference.get (except when the Reference object is null)
       
   633   //   so that we can log the value in the referent field with an SATB
       
   634   //   update buffer.
       
   635   //   If the code for the getfield template is modified so that the
       
   636   //   G1 pre-barrier code is executed when the current method is
       
   637   //   Reference.get() then going through the normal method entry
       
   638   //   will be fine.
       
   639   // * The G1 code can, however, check the receiver object (the instance
       
   640   //   of java.lang.Reference) and jump to the slow path if null. If the
       
   641   //   Reference object is null then we obviously cannot fetch the referent
       
   642   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
       
   643   //   regular method entry code to generate the NPE.
       
   644   //
       
   645   // rbx: Method*
       
   646 
       
   647   // r13: senderSP must preserve for slow path, set SP to it on fast path
       
   648 
       
   649   address entry = __ pc();
       
   650 
       
   651   const int referent_offset = java_lang_ref_Reference::referent_offset;
       
   652   guarantee(referent_offset > 0, "referent offset not initialized");
       
   653 
       
   654   if (UseG1GC) {
       
   655     Label slow_path;
       
   656     // rbx: method
       
   657 
       
   658     // Check if local 0 != NULL
       
   659     // If the receiver is null then it is OK to jump to the slow path.
       
   660     __ movptr(rax, Address(rsp, wordSize));
       
   661 
       
   662     __ testptr(rax, rax);
       
   663     __ jcc(Assembler::zero, slow_path);
       
   664 
       
   665     // rax: local 0
       
   666     // rbx: method (but can be used as scratch now)
       
   667     // rdx: scratch
       
   668     // rdi: scratch
       
   669 
       
   670     // Generate the G1 pre-barrier code to log the value of
       
   671     // the referent field in an SATB buffer.
       
   672 
       
   673     // Load the value of the referent field.
       
   674     const Address field_address(rax, referent_offset);
       
   675     __ load_heap_oop(rax, field_address);
       
   676 
       
   677     // Generate the G1 pre-barrier code to log the value of
       
   678     // the referent field in an SATB buffer.
       
   679     __ g1_write_barrier_pre(noreg /* obj */,
       
   680                             rax /* pre_val */,
       
   681                             r15_thread /* thread */,
       
   682                             rbx /* tmp */,
       
   683                             true /* tosca_live */,
       
   684                             true /* expand_call */);
       
   685 
       
   686     // _areturn
       
   687     __ pop(rdi);                // get return address
       
   688     __ mov(rsp, r13);           // set sp to sender sp
       
   689     __ jmp(rdi);
       
   690     __ ret(0);
       
   691 
       
   692     // generate a vanilla interpreter entry as the slow path
       
   693     __ bind(slow_path);
       
   694     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
       
   695     return entry;
       
   696   }
       
   697 #endif // INCLUDE_ALL_GCS
       
   698 
       
   699   // If G1 is not enabled then attempt to go through the accessor entry point
       
   700   // Reference.get is an accessor
       
   701   return NULL;
       
   702 }
       
   703 
       
   704 /**
       
   705  * Method entry for static native methods:
       
   706  *   int java.util.zip.CRC32.update(int crc, int b)
       
   707  */
       
   708 address InterpreterGenerator::generate_CRC32_update_entry() {
       
   709   if (UseCRC32Intrinsics) {
       
   710     address entry = __ pc();
       
   711 
       
   712     // rbx,: Method*
       
   713     // r13: senderSP must preserved for slow path, set SP to it on fast path
       
   714     // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
       
   715     // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
       
   716 
       
   717     Label slow_path;
       
   718     // If we need a safepoint check, generate full interpreter entry.
       
   719     ExternalAddress state(SafepointSynchronize::address_of_state());
       
   720     __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
       
   721              SafepointSynchronize::_not_synchronized);
       
   722     __ jcc(Assembler::notEqual, slow_path);
       
   723 
       
   724     // We don't generate local frame and don't align stack because
       
   725     // we call stub code and there is no safepoint on this path.
       
   726 
       
   727     // Load parameters
       
   728     const Register crc = rax;  // crc
       
   729     const Register val = c_rarg0;  // source java byte value
       
   730     const Register tbl = c_rarg1;  // scratch
       
   731 
       
   732     // Arguments are reversed on java expression stack
       
   733     __ movl(val, Address(rsp,   wordSize)); // byte value
       
   734     __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
       
   735 
       
   736     __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
       
   737     __ notl(crc); // ~crc
       
   738     __ update_byte_crc32(crc, val, tbl);
       
   739     __ notl(crc); // ~crc
       
   740     // result in rax
       
   741 
       
   742     // _areturn
       
   743     __ pop(rdi);                // get return address
       
   744     __ mov(rsp, r13);           // set sp to sender sp
       
   745     __ jmp(rdi);
       
   746 
       
   747     // generate a vanilla native entry as the slow path
       
   748     __ bind(slow_path);
       
   749     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
       
   750     return entry;
       
   751   }
       
   752   return NULL;
       
   753 }
       
   754 
       
   755 /**
       
   756  * Method entry for static native methods:
       
   757  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
       
   758  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
       
   759  */
       
   760 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
       
   761   if (UseCRC32Intrinsics) {
       
   762     address entry = __ pc();
       
   763 
       
   764     // rbx,: Method*
       
   765     // r13: senderSP must preserved for slow path, set SP to it on fast path
       
   766 
       
   767     Label slow_path;
       
   768     // If we need a safepoint check, generate full interpreter entry.
       
   769     ExternalAddress state(SafepointSynchronize::address_of_state());
       
   770     __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
       
   771              SafepointSynchronize::_not_synchronized);
       
   772     __ jcc(Assembler::notEqual, slow_path);
       
   773 
       
   774     // We don't generate local frame and don't align stack because
       
   775     // we call stub code and there is no safepoint on this path.
       
   776 
       
   777     // Load parameters
       
   778     const Register crc = c_rarg0;  // crc
       
   779     const Register buf = c_rarg1;  // source java byte array address
       
   780     const Register len = c_rarg2;  // length
       
   781     const Register off = len;      // offset (never overlaps with 'len')
       
   782 
       
   783     // Arguments are reversed on java expression stack
       
   784     // Calculate address of start element
       
   785     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
       
   786       __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
       
   787       __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
       
   788       __ addq(buf, off); // + offset
       
   789       __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
       
   790     } else {
       
   791       __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
       
   792       __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
       
   793       __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
       
   794       __ addq(buf, off); // + offset
       
   795       __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
       
   796     }
       
   797     // Can now load 'len' since we're finished with 'off'
       
   798     __ movl(len, Address(rsp, wordSize)); // Length
       
   799 
       
   800     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
       
   801     // result in rax
       
   802 
       
   803     // _areturn
       
   804     __ pop(rdi);                // get return address
       
   805     __ mov(rsp, r13);           // set sp to sender sp
       
   806     __ jmp(rdi);
       
   807 
       
   808     // generate a vanilla native entry as the slow path
       
   809     __ bind(slow_path);
       
   810     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
       
   811     return entry;
       
   812   }
       
   813   return NULL;
       
   814 }
       
   815 
       
   816 /**
       
   817 * Method entry for static native methods:
       
   818 *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
       
   819 *   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
       
   820 */
       
   821 address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
       
   822   if (UseCRC32CIntrinsics) {
       
   823     address entry = __ pc();
       
   824     // Load parameters
       
   825     const Register crc = c_rarg0;  // crc
       
   826     const Register buf = c_rarg1;  // source java byte array address
       
   827     const Register len = c_rarg2;
       
   828     const Register off = c_rarg3;  // offset
       
   829     const Register end = len;
       
   830 
       
   831     // Arguments are reversed on java expression stack
       
   832     // Calculate address of start element
       
   833     if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
       
   834       __ movptr(buf, Address(rsp, 3 * wordSize)); // long buf
       
   835       __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
       
   836       __ addq(buf, off); // + offset
       
   837       __ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC
       
   838       // Note on 5 * wordSize vs. 4 * wordSize:
       
   839       // *   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
       
   840       //                                                   4         2,3          1        0
       
   841       // end starts at SP + 8
       
   842       // The Java(R) Virtual Machine Specification Java SE 7 Edition
       
   843       // 4.10.2.3. Values of Types long and double
       
   844       //    "When calculating operand stack length, values of type long and double have length two."
       
   845     } else {
       
   846       __ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
       
   847       __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
       
   848       __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
       
   849       __ addq(buf, off); // + offset
       
   850       __ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC
       
   851     }
       
   852     __ movl(end, Address(rsp, wordSize)); // end
       
   853     __ subl(end, off); // end - off
       
   854     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
       
   855     // result in rax
       
   856     // _areturn
       
   857     __ pop(rdi);                // get return address
       
   858     __ mov(rsp, r13);           // set sp to sender sp
       
   859     __ jmp(rdi);
       
   860 
       
   861     return entry;
       
   862   }
       
   863 
       
   864   return NULL;
       
   865 }
       
   866 
       
   867 // Interpreter stub for calling a native method. (asm interpreter)
       
   868 // This sets up a somewhat different looking stack for calling the
       
   869 // native method than the typical interpreter frame setup.
       
   870 address InterpreterGenerator::generate_native_entry(bool synchronized) {
       
   871   // determine code generation flags
       
   872   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
       
   873 
       
   874   // rbx: Method*
       
   875   // r13: sender sp
       
   876 
       
   877   address entry_point = __ pc();
       
   878 
       
   879   const Address constMethod       (rbx, Method::const_offset());
       
   880   const Address access_flags      (rbx, Method::access_flags_offset());
       
   881   const Address size_of_parameters(rcx, ConstMethod::
       
   882                                         size_of_parameters_offset());
       
   883 
       
   884 
       
   885   // get parameter size (always needed)
       
   886   __ movptr(rcx, constMethod);
       
   887   __ load_unsigned_short(rcx, size_of_parameters);
       
   888 
       
   889   // native calls don't need the stack size check since they have no
       
   890   // expression stack and the arguments are already on the stack and
       
   891   // we only add a handful of words to the stack
       
   892 
       
   893   // rbx: Method*
       
   894   // rcx: size of parameters
       
   895   // r13: sender sp
       
   896   __ pop(rax);                                       // get return address
       
   897 
       
   898   // for natives the size of locals is zero
       
   899 
       
   900   // compute beginning of parameters (r14)
       
   901   __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
       
   902 
       
   903   // add 2 zero-initialized slots for native calls
       
   904   // initialize result_handler slot
       
   905   __ push((int) NULL_WORD);
       
   906   // slot for oop temp
       
   907   // (static native method holder mirror/jni oop result)
       
   908   __ push((int) NULL_WORD);
       
   909 
       
   910   // initialize fixed part of activation frame
       
   911   generate_fixed_frame(true);
       
   912 
       
   913   // make sure method is native & not abstract
       
   914 #ifdef ASSERT
       
   915   __ movl(rax, access_flags);
       
   916   {
       
   917     Label L;
       
   918     __ testl(rax, JVM_ACC_NATIVE);
       
   919     __ jcc(Assembler::notZero, L);
       
   920     __ stop("tried to execute non-native method as native");
       
   921     __ bind(L);
       
   922   }
       
   923   {
       
   924     Label L;
       
   925     __ testl(rax, JVM_ACC_ABSTRACT);
       
   926     __ jcc(Assembler::zero, L);
       
   927     __ stop("tried to execute abstract method in interpreter");
       
   928     __ bind(L);
       
   929   }
       
   930 #endif
       
   931 
       
   932   // Since at this point in the method invocation the exception handler
       
   933   // would try to exit the monitor of synchronized methods which hasn't
       
   934   // been entered yet, we set the thread local variable
       
   935   // _do_not_unlock_if_synchronized to true. The remove_activation will
       
   936   // check this flag.
       
   937 
       
   938   const Address do_not_unlock_if_synchronized(r15_thread,
       
   939         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
       
   940   __ movbool(do_not_unlock_if_synchronized, true);
       
   941 
       
   942   // increment invocation count & check for overflow
       
   943   Label invocation_counter_overflow;
       
   944   if (inc_counter) {
       
   945     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
       
   946   }
       
   947 
       
   948   Label continue_after_compile;
       
   949   __ bind(continue_after_compile);
       
   950 
       
   951   bang_stack_shadow_pages(true);
       
   952 
       
   953   // reset the _do_not_unlock_if_synchronized flag
       
   954   __ movbool(do_not_unlock_if_synchronized, false);
       
   955 
       
   956   // check for synchronized methods
       
   957   // Must happen AFTER invocation_counter check and stack overflow check,
       
   958   // so method is not locked if overflows.
       
   959   if (synchronized) {
       
   960     lock_method();
       
   961   } else {
       
   962     // no synchronization necessary
       
   963 #ifdef ASSERT
       
   964     {
       
   965       Label L;
       
   966       __ movl(rax, access_flags);
       
   967       __ testl(rax, JVM_ACC_SYNCHRONIZED);
       
   968       __ jcc(Assembler::zero, L);
       
   969       __ stop("method needs synchronization");
       
   970       __ bind(L);
       
   971     }
       
   972 #endif
       
   973   }
       
   974 
       
   975   // start execution
       
   976 #ifdef ASSERT
       
   977   {
       
   978     Label L;
       
   979     const Address monitor_block_top(rbp,
       
   980                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
       
   981     __ movptr(rax, monitor_block_top);
       
   982     __ cmpptr(rax, rsp);
       
   983     __ jcc(Assembler::equal, L);
       
   984     __ stop("broken stack frame setup in interpreter");
       
   985     __ bind(L);
       
   986   }
       
   987 #endif
       
   988 
       
   989   // jvmti support
       
   990   __ notify_method_entry();
       
   991 
       
   992   // work registers
       
   993   const Register method = rbx;
       
   994   const Register t      = r11;
       
   995 
       
   996   // allocate space for parameters
       
   997   __ get_method(method);
       
   998   __ movptr(t, Address(method, Method::const_offset()));
       
   999   __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
       
  1000   __ shll(t, Interpreter::logStackElementSize);
       
  1001 
       
  1002   __ subptr(rsp, t);
       
  1003   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
       
  1004   __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
       
  1005 
       
  1006   // get signature handler
       
  1007   {
       
  1008     Label L;
       
  1009     __ movptr(t, Address(method, Method::signature_handler_offset()));
       
  1010     __ testptr(t, t);
       
  1011     __ jcc(Assembler::notZero, L);
       
  1012     __ call_VM(noreg,
       
  1013                CAST_FROM_FN_PTR(address,
       
  1014                                 InterpreterRuntime::prepare_native_call),
       
  1015                method);
       
  1016     __ get_method(method);
       
  1017     __ movptr(t, Address(method, Method::signature_handler_offset()));
       
  1018     __ bind(L);
       
  1019   }
       
  1020 
       
  1021   // call signature handler
       
  1022   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14,
       
  1023          "adjust this code");
       
  1024   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
       
  1025          "adjust this code");
       
  1026   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
       
  1027           "adjust this code");
       
  1028 
       
  1029   // The generated handlers do not touch RBX (the method oop).
       
  1030   // However, large signatures cannot be cached and are generated
       
  1031   // each time here.  The slow-path generator can do a GC on return,
       
  1032   // so we must reload it after the call.
       
  1033   __ call(t);
       
  1034   __ get_method(method);        // slow path can do a GC, reload RBX
       
  1035 
       
  1036 
       
  1037   // result handler is in rax
       
  1038   // set result handler
       
  1039   __ movptr(Address(rbp,
       
  1040                     (frame::interpreter_frame_result_handler_offset) * wordSize),
       
  1041             rax);
       
  1042 
       
  1043   // pass mirror handle if static call
       
  1044   {
       
  1045     Label L;
       
  1046     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
       
  1047     __ movl(t, Address(method, Method::access_flags_offset()));
       
  1048     __ testl(t, JVM_ACC_STATIC);
       
  1049     __ jcc(Assembler::zero, L);
       
  1050     // get mirror
       
  1051     __ movptr(t, Address(method, Method::const_offset()));
       
  1052     __ movptr(t, Address(t, ConstMethod::constants_offset()));
       
  1053     __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
       
  1054     __ movptr(t, Address(t, mirror_offset));
       
  1055     // copy mirror into activation frame
       
  1056     __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
       
  1057             t);
       
  1058     // pass handle to mirror
       
  1059     __ lea(c_rarg1,
       
  1060            Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
       
  1061     __ bind(L);
       
  1062   }
       
  1063 
       
  1064   // get native function entry point
       
  1065   {
       
  1066     Label L;
       
  1067     __ movptr(rax, Address(method, Method::native_function_offset()));
       
  1068     ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
       
  1069     __ movptr(rscratch2, unsatisfied.addr());
       
  1070     __ cmpptr(rax, rscratch2);
       
  1071     __ jcc(Assembler::notEqual, L);
       
  1072     __ call_VM(noreg,
       
  1073                CAST_FROM_FN_PTR(address,
       
  1074                                 InterpreterRuntime::prepare_native_call),
       
  1075                method);
       
  1076     __ get_method(method);
       
  1077     __ movptr(rax, Address(method, Method::native_function_offset()));
       
  1078     __ bind(L);
       
  1079   }
       
  1080 
       
  1081   // pass JNIEnv
       
  1082   __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
       
  1083 
       
  1084   // It is enough that the pc() points into the right code
       
  1085   // segment. It does not have to be the correct return pc.
       
  1086   __ set_last_Java_frame(rsp, rbp, (address) __ pc());
       
  1087 
       
  1088   // change thread state
       
  1089 #ifdef ASSERT
       
  1090   {
       
  1091     Label L;
       
  1092     __ movl(t, Address(r15_thread, JavaThread::thread_state_offset()));
       
  1093     __ cmpl(t, _thread_in_Java);
       
  1094     __ jcc(Assembler::equal, L);
       
  1095     __ stop("Wrong thread state in native stub");
       
  1096     __ bind(L);
       
  1097   }
       
  1098 #endif
       
  1099 
       
  1100   // Change state to native
       
  1101 
       
  1102   __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
       
  1103           _thread_in_native);
       
  1104 
       
  1105   // Call the native method.
       
  1106   __ call(rax);
       
  1107   // result potentially in rax or xmm0
       
  1108 
       
  1109   // Verify or restore cpu control state after JNI call
       
  1110   __ restore_cpu_control_state_after_jni();
       
  1111 
       
  1112   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
       
  1113   // in order to extract the result of a method call. If the order of these
       
  1114   // pushes change or anything else is added to the stack then the code in
       
  1115   // interpreter_frame_result must also change.
       
  1116 
       
  1117   __ push(dtos);
       
  1118   __ push(ltos);
       
  1119 
       
  1120   // change thread state
       
  1121   __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
       
  1122           _thread_in_native_trans);
       
  1123 
       
  1124   if (os::is_MP()) {
       
  1125     if (UseMembar) {
       
  1126       // Force this write out before the read below
       
  1127       __ membar(Assembler::Membar_mask_bits(
       
  1128            Assembler::LoadLoad | Assembler::LoadStore |
       
  1129            Assembler::StoreLoad | Assembler::StoreStore));
       
  1130     } else {
       
  1131       // Write serialization page so VM thread can do a pseudo remote membar.
       
  1132       // We use the current thread pointer to calculate a thread specific
       
  1133       // offset to write to within the page. This minimizes bus traffic
       
  1134       // due to cache line collision.
       
  1135       __ serialize_memory(r15_thread, rscratch2);
       
  1136     }
       
  1137   }
       
  1138 
       
  1139   // check for safepoint operation in progress and/or pending suspend requests
       
  1140   {
       
  1141     Label Continue;
       
  1142     __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
       
  1143              SafepointSynchronize::_not_synchronized);
       
  1144 
       
  1145     Label L;
       
  1146     __ jcc(Assembler::notEqual, L);
       
  1147     __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
       
  1148     __ jcc(Assembler::equal, Continue);
       
  1149     __ bind(L);
       
  1150 
       
  1151     // Don't use call_VM as it will see a possible pending exception
       
  1152     // and forward it and never return here preventing us from
       
  1153     // clearing _last_native_pc down below.  Also can't use
       
  1154     // call_VM_leaf either as it will check to see if r13 & r14 are
       
  1155     // preserved and correspond to the bcp/locals pointers. So we do a
       
  1156     // runtime call by hand.
       
  1157     //
       
  1158     __ mov(c_rarg0, r15_thread);
       
  1159     __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
       
  1160     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
       
  1161     __ andptr(rsp, -16); // align stack as required by ABI
       
  1162     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
       
  1163     __ mov(rsp, r12); // restore sp
       
  1164     __ reinit_heapbase();
       
  1165     __ bind(Continue);
       
  1166   }
       
  1167 
       
  1168   // change thread state
       
  1169   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
       
  1170 
       
  1171   // reset_last_Java_frame
       
  1172   __ reset_last_Java_frame(true, true);
       
  1173 
       
  1174   // reset handle block
       
  1175   __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
       
  1176   __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
       
  1177 
       
  1178   // If result is an oop unbox and store it in frame where gc will see it
       
  1179   // and result handler will pick it up
       
  1180 
       
  1181   {
       
  1182     Label no_oop, store_result;
       
  1183     __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
       
  1184     __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
       
  1185     __ jcc(Assembler::notEqual, no_oop);
       
  1186     // retrieve result
       
  1187     __ pop(ltos);
       
  1188     __ testptr(rax, rax);
       
  1189     __ jcc(Assembler::zero, store_result);
       
  1190     __ movptr(rax, Address(rax, 0));
       
  1191     __ bind(store_result);
       
  1192     __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
       
  1193     // keep stack depth as expected by pushing oop which will eventually be discarde
       
  1194     __ push(ltos);
       
  1195     __ bind(no_oop);
       
  1196   }
       
  1197 
       
  1198 
       
  1199   {
       
  1200     Label no_reguard;
       
  1201     __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()),
       
  1202             JavaThread::stack_guard_yellow_disabled);
       
  1203     __ jcc(Assembler::notEqual, no_reguard);
       
  1204 
       
  1205     __ pusha(); // XXX only save smashed registers
       
  1206     __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
       
  1207     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
       
  1208     __ andptr(rsp, -16); // align stack as required by ABI
       
  1209     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
       
  1210     __ mov(rsp, r12); // restore sp
       
  1211     __ popa(); // XXX only restore smashed registers
       
  1212     __ reinit_heapbase();
       
  1213 
       
  1214     __ bind(no_reguard);
       
  1215   }
       
  1216 
       
  1217 
       
  1218   // The method register is junk from after the thread_in_native transition
       
  1219   // until here.  Also can't call_VM until the bcp has been
       
  1220   // restored.  Need bcp for throwing exception below so get it now.
       
  1221   __ get_method(method);
       
  1222 
       
  1223   // restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
       
  1224   // r13 == code_base()
       
  1225   __ movptr(r13, Address(method, Method::const_offset()));   // get ConstMethod*
       
  1226   __ lea(r13, Address(r13, ConstMethod::codes_offset()));    // get codebase
       
  1227   // handle exceptions (exception handling will handle unlocking!)
       
  1228   {
       
  1229     Label L;
       
  1230     __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
       
  1231     __ jcc(Assembler::zero, L);
       
  1232     // Note: At some point we may want to unify this with the code
       
  1233     // used in call_VM_base(); i.e., we should use the
       
  1234     // StubRoutines::forward_exception code. For now this doesn't work
       
  1235     // here because the rsp is not correctly set at this point.
       
  1236     __ MacroAssembler::call_VM(noreg,
       
  1237                                CAST_FROM_FN_PTR(address,
       
  1238                                InterpreterRuntime::throw_pending_exception));
       
  1239     __ should_not_reach_here();
       
  1240     __ bind(L);
       
  1241   }
       
  1242 
       
  1243   // do unlocking if necessary
       
  1244   {
       
  1245     Label L;
       
  1246     __ movl(t, Address(method, Method::access_flags_offset()));
       
  1247     __ testl(t, JVM_ACC_SYNCHRONIZED);
       
  1248     __ jcc(Assembler::zero, L);
       
  1249     // the code below should be shared with interpreter macro
       
  1250     // assembler implementation
       
  1251     {
       
  1252       Label unlock;
       
  1253       // BasicObjectLock will be first in list, since this is a
       
  1254       // synchronized method. However, need to check that the object
       
  1255       // has not been unlocked by an explicit monitorexit bytecode.
       
  1256       const Address monitor(rbp,
       
  1257                             (intptr_t)(frame::interpreter_frame_initial_sp_offset *
       
  1258                                        wordSize - sizeof(BasicObjectLock)));
       
  1259 
       
  1260       // monitor expect in c_rarg1 for slow unlock path
       
  1261       __ lea(c_rarg1, monitor); // address of first monitor
       
  1262 
       
  1263       __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
       
  1264       __ testptr(t, t);
       
  1265       __ jcc(Assembler::notZero, unlock);
       
  1266 
       
  1267       // Entry already unlocked, need to throw exception
       
  1268       __ MacroAssembler::call_VM(noreg,
       
  1269                                  CAST_FROM_FN_PTR(address,
       
  1270                    InterpreterRuntime::throw_illegal_monitor_state_exception));
       
  1271       __ should_not_reach_here();
       
  1272 
       
  1273       __ bind(unlock);
       
  1274       __ unlock_object(c_rarg1);
       
  1275     }
       
  1276     __ bind(L);
       
  1277   }
       
  1278 
       
  1279   // jvmti support
       
  1280   // Note: This must happen _after_ handling/throwing any exceptions since
       
  1281   //       the exception handler code notifies the runtime of method exits
       
  1282   //       too. If this happens before, method entry/exit notifications are
       
  1283   //       not properly paired (was bug - gri 11/22/99).
       
  1284   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
       
  1285 
       
  1286   // restore potential result in edx:eax, call result handler to
       
  1287   // restore potential result in ST0 & handle result
       
  1288 
       
  1289   __ pop(ltos);
       
  1290   __ pop(dtos);
       
  1291 
       
  1292   __ movptr(t, Address(rbp,
       
  1293                        (frame::interpreter_frame_result_handler_offset) * wordSize));
       
  1294   __ call(t);
       
  1295 
       
  1296   // remove activation
       
  1297   __ movptr(t, Address(rbp,
       
  1298                        frame::interpreter_frame_sender_sp_offset *
       
  1299                        wordSize)); // get sender sp
       
  1300   __ leave();                                // remove frame anchor
       
  1301   __ pop(rdi);                               // get return address
       
  1302   __ mov(rsp, t);                            // set sp to sender sp
       
  1303   __ jmp(rdi);
       
  1304 
       
  1305   if (inc_counter) {
       
  1306     // Handle overflow of counter and compile method
       
  1307     __ bind(invocation_counter_overflow);
       
  1308     generate_counter_overflow(&continue_after_compile);
       
  1309   }
       
  1310 
       
  1311   return entry_point;
       
  1312 }
       
  1313 
       
  1314 //
       
  1315 // Generic interpreted method entry to (asm) interpreter
       
  1316 //
       
  1317 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
       
  1318   // determine code generation flags
       
  1319   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
       
  1320 
       
  1321   // ebx: Method*
       
  1322   // r13: sender sp
       
  1323   address entry_point = __ pc();
       
  1324 
       
  1325   const Address constMethod(rbx, Method::const_offset());
       
  1326   const Address access_flags(rbx, Method::access_flags_offset());
       
  1327   const Address size_of_parameters(rdx,
       
  1328                                    ConstMethod::size_of_parameters_offset());
       
  1329   const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
       
  1330 
       
  1331 
       
  1332   // get parameter size (always needed)
       
  1333   __ movptr(rdx, constMethod);
       
  1334   __ load_unsigned_short(rcx, size_of_parameters);
       
  1335 
       
  1336   // rbx: Method*
       
  1337   // rcx: size of parameters
       
  1338   // r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
       
  1339 
       
  1340   __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
       
  1341   __ subl(rdx, rcx); // rdx = no. of additional locals
       
  1342 
       
  1343   // YYY
       
  1344 //   __ incrementl(rdx);
       
  1345 //   __ andl(rdx, -2);
       
  1346 
       
  1347   // see if we've got enough room on the stack for locals plus overhead.
       
  1348   generate_stack_overflow_check();
       
  1349 
       
  1350   // get return address
       
  1351   __ pop(rax);
       
  1352 
       
  1353   // compute beginning of parameters (r14)
       
  1354   __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
       
  1355 
       
  1356   // rdx - # of additional locals
       
  1357   // allocate space for locals
       
  1358   // explicitly initialize locals
       
  1359   {
       
  1360     Label exit, loop;
       
  1361     __ testl(rdx, rdx);
       
  1362     __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
       
  1363     __ bind(loop);
       
  1364     __ push((int) NULL_WORD); // initialize local variables
       
  1365     __ decrementl(rdx); // until everything initialized
       
  1366     __ jcc(Assembler::greater, loop);
       
  1367     __ bind(exit);
       
  1368   }
       
  1369 
       
  1370   // initialize fixed part of activation frame
       
  1371   generate_fixed_frame(false);
       
  1372 
       
  1373   // make sure method is not native & not abstract
       
  1374 #ifdef ASSERT
       
  1375   __ movl(rax, access_flags);
       
  1376   {
       
  1377     Label L;
       
  1378     __ testl(rax, JVM_ACC_NATIVE);
       
  1379     __ jcc(Assembler::zero, L);
       
  1380     __ stop("tried to execute native method as non-native");
       
  1381     __ bind(L);
       
  1382   }
       
  1383   {
       
  1384     Label L;
       
  1385     __ testl(rax, JVM_ACC_ABSTRACT);
       
  1386     __ jcc(Assembler::zero, L);
       
  1387     __ stop("tried to execute abstract method in interpreter");
       
  1388     __ bind(L);
       
  1389   }
       
  1390 #endif
       
  1391 
       
  1392   // Since at this point in the method invocation the exception
       
  1393   // handler would try to exit the monitor of synchronized methods
       
  1394   // which hasn't been entered yet, we set the thread local variable
       
  1395   // _do_not_unlock_if_synchronized to true. The remove_activation
       
  1396   // will check this flag.
       
  1397 
       
  1398   const Address do_not_unlock_if_synchronized(r15_thread,
       
  1399         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
       
  1400   __ movbool(do_not_unlock_if_synchronized, true);
       
  1401 
       
  1402   __ profile_parameters_type(rax, rcx, rdx);
       
  1403   // increment invocation count & check for overflow
       
  1404   Label invocation_counter_overflow;
       
  1405   Label profile_method;
       
  1406   Label profile_method_continue;
       
  1407   if (inc_counter) {
       
  1408     generate_counter_incr(&invocation_counter_overflow,
       
  1409                           &profile_method,
       
  1410                           &profile_method_continue);
       
  1411     if (ProfileInterpreter) {
       
  1412       __ bind(profile_method_continue);
       
  1413     }
       
  1414   }
       
  1415 
       
  1416   Label continue_after_compile;
       
  1417   __ bind(continue_after_compile);
       
  1418 
       
  1419   // check for synchronized interpreted methods
       
  1420   bang_stack_shadow_pages(false);
       
  1421 
       
  1422   // reset the _do_not_unlock_if_synchronized flag
       
  1423   __ movbool(do_not_unlock_if_synchronized, false);
       
  1424 
       
  1425   // check for synchronized methods
       
  1426   // Must happen AFTER invocation_counter check and stack overflow check,
       
  1427   // so method is not locked if overflows.
       
  1428   if (synchronized) {
       
  1429     // Allocate monitor and lock method
       
  1430     lock_method();
       
  1431   } else {
       
  1432     // no synchronization necessary
       
  1433 #ifdef ASSERT
       
  1434     {
       
  1435       Label L;
       
  1436       __ movl(rax, access_flags);
       
  1437       __ testl(rax, JVM_ACC_SYNCHRONIZED);
       
  1438       __ jcc(Assembler::zero, L);
       
  1439       __ stop("method needs synchronization");
       
  1440       __ bind(L);
       
  1441     }
       
  1442 #endif
       
  1443   }
       
  1444 
       
  1445   // start execution
       
  1446 #ifdef ASSERT
       
  1447   {
       
  1448     Label L;
       
  1449      const Address monitor_block_top (rbp,
       
  1450                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
       
  1451     __ movptr(rax, monitor_block_top);
       
  1452     __ cmpptr(rax, rsp);
       
  1453     __ jcc(Assembler::equal, L);
       
  1454     __ stop("broken stack frame setup in interpreter");
       
  1455     __ bind(L);
       
  1456   }
       
  1457 #endif
       
  1458 
       
  1459   // jvmti support
       
  1460   __ notify_method_entry();
       
  1461 
       
  1462   __ dispatch_next(vtos);
       
  1463 
       
  1464   // invocation counter overflow
       
  1465   if (inc_counter) {
       
  1466     if (ProfileInterpreter) {
       
  1467       // We have decided to profile this method in the interpreter
       
  1468       __ bind(profile_method);
       
  1469       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
       
  1470       __ set_method_data_pointer_for_bcp();
       
  1471       __ get_method(rbx);
       
  1472       __ jmp(profile_method_continue);
       
  1473     }
       
  1474     // Handle overflow of counter and compile method
       
  1475     __ bind(invocation_counter_overflow);
       
  1476     generate_counter_overflow(&continue_after_compile);
       
  1477   }
       
  1478 
       
  1479   return entry_point;
       
  1480 }
       
  1481 
       
  1482 
       
  1483 // These should never be compiled since the interpreter will prefer
       
  1484 // the compiled version to the intrinsic version.
       
  1485 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
       
  1486   switch (method_kind(m)) {
       
  1487     case Interpreter::java_lang_math_sin     : // fall thru
       
  1488     case Interpreter::java_lang_math_cos     : // fall thru
       
  1489     case Interpreter::java_lang_math_tan     : // fall thru
       
  1490     case Interpreter::java_lang_math_abs     : // fall thru
       
  1491     case Interpreter::java_lang_math_log     : // fall thru
       
  1492     case Interpreter::java_lang_math_log10   : // fall thru
       
  1493     case Interpreter::java_lang_math_sqrt    : // fall thru
       
  1494     case Interpreter::java_lang_math_pow     : // fall thru
       
  1495     case Interpreter::java_lang_math_exp     :
       
  1496       return false;
       
  1497     default:
       
  1498       return true;
       
  1499   }
       
  1500 }
       
  1501 
       
  1502 // How much stack a method activation needs in words.
       
  1503 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
       
  1504   const int entry_size = frame::interpreter_frame_monitor_size();
       
  1505 
       
  1506   // total overhead size: entry_size + (saved rbp thru expr stack
       
  1507   // bottom).  be sure to change this if you add/subtract anything
       
  1508   // to/from the overhead area
       
  1509   const int overhead_size =
       
  1510     -(frame::interpreter_frame_initial_sp_offset) + entry_size;
       
  1511 
       
  1512   const int stub_code = frame::entry_frame_after_call_words;
       
  1513   const int method_stack = (method->max_locals() + method->max_stack()) *
       
  1514                            Interpreter::stackElementWords;
       
  1515   return (overhead_size + method_stack + stub_code);
       
  1516 }
       
  1517 
       
  1518 //-----------------------------------------------------------------------------
       
  1519 // Exceptions
       
  1520 
       
  1521 void TemplateInterpreterGenerator::generate_throw_exception() {
       
  1522   // Entry point in previous activation (i.e., if the caller was
       
  1523   // interpreted)
       
  1524   Interpreter::_rethrow_exception_entry = __ pc();
       
  1525   // Restore sp to interpreter_frame_last_sp even though we are going
       
  1526   // to empty the expression stack for the exception processing.
       
  1527   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
       
  1528   // rax: exception
       
  1529   // rdx: return address/pc that threw exception
       
  1530   __ restore_bcp();    // r13 points to call/send
       
  1531   __ restore_locals();
       
  1532   __ reinit_heapbase();  // restore r12 as heapbase.
       
  1533   // Entry point for exceptions thrown within interpreter code
       
  1534   Interpreter::_throw_exception_entry = __ pc();
       
  1535   // expression stack is undefined here
       
  1536   // rax: exception
       
  1537   // r13: exception bcp
       
  1538   __ verify_oop(rax);
       
  1539   __ mov(c_rarg1, rax);
       
  1540 
       
  1541   // expression stack must be empty before entering the VM in case of
       
  1542   // an exception
       
  1543   __ empty_expression_stack();
       
  1544   // find exception handler address and preserve exception oop
       
  1545   __ call_VM(rdx,
       
  1546              CAST_FROM_FN_PTR(address,
       
  1547                           InterpreterRuntime::exception_handler_for_exception),
       
  1548              c_rarg1);
       
  1549   // rax: exception handler entry point
       
  1550   // rdx: preserved exception oop
       
  1551   // r13: bcp for exception handler
       
  1552   __ push_ptr(rdx); // push exception which is now the only value on the stack
       
  1553   __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
       
  1554 
       
  1555   // If the exception is not handled in the current frame the frame is
       
  1556   // removed and the exception is rethrown (i.e. exception
       
  1557   // continuation is _rethrow_exception).
       
  1558   //
       
  1559   // Note: At this point the bci is still the bxi for the instruction
       
  1560   // which caused the exception and the expression stack is
       
  1561   // empty. Thus, for any VM calls at this point, GC will find a legal
       
  1562   // oop map (with empty expression stack).
       
  1563 
       
  1564   // In current activation
       
  1565   // tos: exception
       
  1566   // esi: exception bcp
       
  1567 
       
  1568   //
       
  1569   // JVMTI PopFrame support
       
  1570   //
       
  1571 
       
  1572   Interpreter::_remove_activation_preserving_args_entry = __ pc();
       
  1573   __ empty_expression_stack();
       
  1574   // Set the popframe_processing bit in pending_popframe_condition
       
  1575   // indicating that we are currently handling popframe, so that
       
  1576   // call_VMs that may happen later do not trigger new popframe
       
  1577   // handling cycles.
       
  1578   __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset()));
       
  1579   __ orl(rdx, JavaThread::popframe_processing_bit);
       
  1580   __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx);
       
  1581 
       
  1582   {
       
  1583     // Check to see whether we are returning to a deoptimized frame.
       
  1584     // (The PopFrame call ensures that the caller of the popped frame is
       
  1585     // either interpreted or compiled and deoptimizes it if compiled.)
       
  1586     // In this case, we can't call dispatch_next() after the frame is
       
  1587     // popped, but instead must save the incoming arguments and restore
       
  1588     // them after deoptimization has occurred.
       
  1589     //
       
  1590     // Note that we don't compare the return PC against the
       
  1591     // deoptimization blob's unpack entry because of the presence of
       
  1592     // adapter frames in C2.
       
  1593     Label caller_not_deoptimized;
       
  1594     __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
       
  1595     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
       
  1596                                InterpreterRuntime::interpreter_contains), c_rarg1);
       
  1597     __ testl(rax, rax);
       
  1598     __ jcc(Assembler::notZero, caller_not_deoptimized);
       
  1599 
       
  1600     // Compute size of arguments for saving when returning to
       
  1601     // deoptimized caller
       
  1602     __ get_method(rax);
       
  1603     __ movptr(rax, Address(rax, Method::const_offset()));
       
  1604     __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
       
  1605                                                 size_of_parameters_offset())));
       
  1606     __ shll(rax, Interpreter::logStackElementSize);
       
  1607     __ restore_locals(); // XXX do we need this?
       
  1608     __ subptr(r14, rax);
       
  1609     __ addptr(r14, wordSize);
       
  1610     // Save these arguments
       
  1611     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
       
  1612                                            Deoptimization::
       
  1613                                            popframe_preserve_args),
       
  1614                           r15_thread, rax, r14);
       
  1615 
       
  1616     __ remove_activation(vtos, rdx,
       
  1617                          /* throw_monitor_exception */ false,
       
  1618                          /* install_monitor_exception */ false,
       
  1619                          /* notify_jvmdi */ false);
       
  1620 
       
  1621     // Inform deoptimization that it is responsible for restoring
       
  1622     // these arguments
       
  1623     __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
       
  1624             JavaThread::popframe_force_deopt_reexecution_bit);
       
  1625 
       
  1626     // Continue in deoptimization handler
       
  1627     __ jmp(rdx);
       
  1628 
       
  1629     __ bind(caller_not_deoptimized);
       
  1630   }
       
  1631 
       
  1632   __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
       
  1633                        /* throw_monitor_exception */ false,
       
  1634                        /* install_monitor_exception */ false,
       
  1635                        /* notify_jvmdi */ false);
       
  1636 
       
  1637   // Finish with popframe handling
       
  1638   // A previous I2C followed by a deoptimization might have moved the
       
  1639   // outgoing arguments further up the stack. PopFrame expects the
       
  1640   // mutations to those outgoing arguments to be preserved and other
       
  1641   // constraints basically require this frame to look exactly as
       
  1642   // though it had previously invoked an interpreted activation with
       
  1643   // no space between the top of the expression stack (current
       
  1644   // last_sp) and the top of stack. Rather than force deopt to
       
  1645   // maintain this kind of invariant all the time we call a small
       
  1646   // fixup routine to move the mutated arguments onto the top of our
       
  1647   // expression stack if necessary.
       
  1648   __ mov(c_rarg1, rsp);
       
  1649   __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
       
  1650   // PC must point into interpreter here
       
  1651   __ set_last_Java_frame(noreg, rbp, __ pc());
       
  1652   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
       
  1653   __ reset_last_Java_frame(true, true);
       
  1654   // Restore the last_sp and null it out
       
  1655   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
       
  1656   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
       
  1657 
       
  1658   __ restore_bcp();  // XXX do we need this?
       
  1659   __ restore_locals(); // XXX do we need this?
       
  1660   // The method data pointer was incremented already during
       
  1661   // call profiling. We have to restore the mdp for the current bcp.
       
  1662   if (ProfileInterpreter) {
       
  1663     __ set_method_data_pointer_for_bcp();
       
  1664   }
       
  1665 
       
  1666   // Clear the popframe condition flag
       
  1667   __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
       
  1668           JavaThread::popframe_inactive);
       
  1669 
       
  1670 #if INCLUDE_JVMTI
       
  1671   {
       
  1672     Label L_done;
       
  1673     const Register local0 = r14;
       
  1674 
       
  1675     __ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
       
  1676     __ jcc(Assembler::notEqual, L_done);
       
  1677 
       
  1678     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
       
  1679     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
       
  1680 
       
  1681     __ get_method(rdx);
       
  1682     __ movptr(rax, Address(local0, 0));
       
  1683     __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
       
  1684 
       
  1685     __ testptr(rax, rax);
       
  1686     __ jcc(Assembler::zero, L_done);
       
  1687 
       
  1688     __ movptr(Address(rbx, 0), rax);
       
  1689     __ bind(L_done);
       
  1690   }
       
  1691 #endif // INCLUDE_JVMTI
       
  1692 
       
  1693   __ dispatch_next(vtos);
       
  1694   // end of PopFrame support
       
  1695 
       
  1696   Interpreter::_remove_activation_entry = __ pc();
       
  1697 
       
  1698   // preserve exception over this code sequence
       
  1699   __ pop_ptr(rax);
       
  1700   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax);
       
  1701   // remove the activation (without doing throws on illegalMonitorExceptions)
       
  1702   __ remove_activation(vtos, rdx, false, true, false);
       
  1703   // restore exception
       
  1704   __ get_vm_result(rax, r15_thread);
       
  1705 
       
  1706   // In between activations - previous activation type unknown yet
       
  1707   // compute continuation point - the continuation point expects the
       
  1708   // following registers set up:
       
  1709   //
       
  1710   // rax: exception
       
  1711   // rdx: return address/pc that threw exception
       
  1712   // rsp: expression stack of caller
       
  1713   // rbp: ebp of caller
       
  1714   __ push(rax);                                  // save exception
       
  1715   __ push(rdx);                                  // save return address
       
  1716   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
       
  1717                           SharedRuntime::exception_handler_for_return_address),
       
  1718                         r15_thread, rdx);
       
  1719   __ mov(rbx, rax);                              // save exception handler
       
  1720   __ pop(rdx);                                   // restore return address
       
  1721   __ pop(rax);                                   // restore exception
       
  1722   // Note that an "issuing PC" is actually the next PC after the call
       
  1723   __ jmp(rbx);                                   // jump to exception
       
  1724                                                  // handler of caller
       
  1725 }
       
  1726 
       
  1727 
       
  1728 //
       
  1729 // JVMTI ForceEarlyReturn support
       
  1730 //
       
  1731 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
       
  1732   address entry = __ pc();
       
  1733 
       
  1734   __ restore_bcp();
       
  1735   __ restore_locals();
       
  1736   __ empty_expression_stack();
       
  1737   __ load_earlyret_value(state);
       
  1738 
       
  1739   __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
       
  1740   Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
       
  1741 
       
  1742   // Clear the earlyret state
       
  1743   __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
       
  1744 
       
  1745   __ remove_activation(state, rsi,
       
  1746                        false, /* throw_monitor_exception */
       
  1747                        false, /* install_monitor_exception */
       
  1748                        true); /* notify_jvmdi */
       
  1749   __ jmp(rsi);
       
  1750 
       
  1751   return entry;
       
  1752 } // end of ForceEarlyReturn support
       
  1753 
       
  1754 
       
  1755 //-----------------------------------------------------------------------------
       
  1756 // Helper for vtos entry point generation
       
  1757 
       
  1758 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
       
  1759                                                          address& bep,
       
  1760                                                          address& cep,
       
  1761                                                          address& sep,
       
  1762                                                          address& aep,
       
  1763                                                          address& iep,
       
  1764                                                          address& lep,
       
  1765                                                          address& fep,
       
  1766                                                          address& dep,
       
  1767                                                          address& vep) {
       
  1768   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
       
  1769   Label L;
       
  1770   aep = __ pc();  __ push_ptr();   __ jmp(L);
       
  1771   fep = __ pc();  __ push_f(xmm0); __ jmp(L);
       
  1772   dep = __ pc();  __ push_d(xmm0); __ jmp(L);
       
  1773   lep = __ pc();  __ push_l();     __ jmp(L);
       
  1774   bep = cep = sep =
       
  1775   iep = __ pc();  __ push_i();
       
  1776   vep = __ pc();
       
  1777   __ bind(L);
       
  1778   generate_and_dispatch(t);
       
  1779 }
       
  1780 
       
  1781 
       
  1782 //-----------------------------------------------------------------------------
       
  1783 // Generation of individual instructions
       
  1784 
       
  1785 // helpers for generate_and_dispatch
       
  1786 
       
  1787 
       
  1788 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
       
  1789   : TemplateInterpreterGenerator(code) {
       
  1790    generate_all(); // down here so it can be "virtual"
       
  1791 }
       
  1792 
       
  1793 //-----------------------------------------------------------------------------
       
  1794 
       
  1795 // Non-product code
       
  1796 #ifndef PRODUCT
       
  1797 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
       
  1798   address entry = __ pc();
       
  1799 
       
  1800   __ push(state);
       
  1801   __ push(c_rarg0);
       
  1802   __ push(c_rarg1);
       
  1803   __ push(c_rarg2);
       
  1804   __ push(c_rarg3);
       
  1805   __ mov(c_rarg2, rax);  // Pass itos
       
  1806 #ifdef _WIN64
       
  1807   __ movflt(xmm3, xmm0); // Pass ftos
       
  1808 #endif
       
  1809   __ call_VM(noreg,
       
  1810              CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
       
  1811              c_rarg1, c_rarg2, c_rarg3);
       
  1812   __ pop(c_rarg3);
       
  1813   __ pop(c_rarg2);
       
  1814   __ pop(c_rarg1);
       
  1815   __ pop(c_rarg0);
       
  1816   __ pop(state);
       
  1817   __ ret(0);                                   // return from result handler
       
  1818 
       
  1819   return entry;
       
  1820 }
       
  1821 
       
  1822 void TemplateInterpreterGenerator::count_bytecode() {
       
  1823   __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
       
  1824 }
       
  1825 
       
  1826 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
       
  1827   __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
       
  1828 }
       
  1829 
       
  1830 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
       
  1831   __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
       
  1832   __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
       
  1833   __ orl(rbx,
       
  1834          ((int) t->bytecode()) <<
       
  1835          BytecodePairHistogram::log2_number_of_codes);
       
  1836   __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
       
  1837   __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
       
  1838   __ incrementl(Address(rscratch1, rbx, Address::times_4));
       
  1839 }
       
  1840 
       
  1841 
       
  1842 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
       
  1843   // Call a little run-time stub to avoid blow-up for each bytecode.
       
  1844   // The run-time runtime saves the right registers, depending on
       
  1845   // the tosca in-state for the given template.
       
  1846 
       
  1847   assert(Interpreter::trace_code(t->tos_in()) != NULL,
       
  1848          "entry must have been generated");
       
  1849   __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
       
  1850   __ andptr(rsp, -16); // align stack as required by ABI
       
  1851   __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
       
  1852   __ mov(rsp, r12); // restore sp
       
  1853   __ reinit_heapbase();
       
  1854 }
       
  1855 
       
  1856 
       
  1857 void TemplateInterpreterGenerator::stop_interpreter_at() {
       
  1858   Label L;
       
  1859   __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
       
  1860            StopInterpreterAt);
       
  1861   __ jcc(Assembler::notEqual, L);
       
  1862   __ int3();
       
  1863   __ bind(L);
       
  1864 }
       
  1865 #endif // !PRODUCT
       
  1866 #endif // ! CC_INTERP