src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
changeset 51618 54b344d9dd4e
parent 48717 7a5835a47adf
child 58554 8c3c39710a08
equal deleted inserted replaced
51617:9720ad0a40b6 51618:54b344d9dd4e
    37 #include "opto/runtime.hpp"
    37 #include "opto/runtime.hpp"
    38 #endif
    38 #endif
    39 
    39 
    40 #define __ masm->
    40 #define __ masm->
    41 
    41 
    42 #ifdef PRODUCT
       
    43 #define BLOCK_COMMENT(str) // nothing
       
    44 #else
       
    45 #define BLOCK_COMMENT(str) __ block_comment(str)
       
    46 #endif
       
    47 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
       
    48 
       
    49 #ifndef PRODUCT
    42 #ifndef PRODUCT
    50 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
    43 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
    51 #endif
    44 #endif
    52 
    45 
    53 // Used by compiler only; may use only caller saved, non-argument
    46 // Used by compiler only; may use only caller saved, non-argument registers.
    54 // registers.
       
    55 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    47 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    56   // PPC port: use fixed size.
    48   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
    57   const int code_length = VtableStub::pd_code_size_limit(true);
    49   const int stub_code_length = code_size_limit(true);
    58   VtableStub* s = new (code_length) VtableStub(true, vtable_index);
    50   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
    59 
       
    60   // Can be NULL if there is no free space in the code cache.
    51   // Can be NULL if there is no free space in the code cache.
    61   if (s == NULL) {
    52   if (s == NULL) {
    62     return NULL;
    53     return NULL;
    63   }
    54   }
    64 
    55 
    65   ResourceMark rm;
    56   // Count unused bytes in instruction sequences of variable size.
    66   CodeBuffer cb(s->entry_point(), code_length);
    57   // We add them to the computed buffer size in order to avoid
       
    58   // overflow in subsequently generated stubs.
       
    59   address   start_pc;
       
    60   int       slop_bytes = 8; // just a two-instruction safety net
       
    61   int       slop_delta = 0;
       
    62 
       
    63   ResourceMark    rm;
       
    64   CodeBuffer      cb(s->entry_point(), stub_code_length);
    67   MacroAssembler* masm = new MacroAssembler(&cb);
    65   MacroAssembler* masm = new MacroAssembler(&cb);
    68 
    66 
    69 #ifndef PRODUCT
    67 #if (!defined(PRODUCT) && defined(COMPILER2))
    70   if (CountCompiledCalls) {
    68   if (CountCompiledCalls) {
       
    69     start_pc = __ pc();
       
    70     int load_const_maxLen = 5*BytesPerInstWord;  // load_const generates 5 instructions. Assume that as max size for laod_const_optimized
    71     int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
    71     int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
       
    72     slop_delta  = load_const_maxLen - (__ pc() - start_pc);
       
    73     slop_bytes += slop_delta;
       
    74     assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
    72     __ lwz(R12_scratch2, offs, R11_scratch1);
    75     __ lwz(R12_scratch2, offs, R11_scratch1);
    73     __ addi(R12_scratch2, R12_scratch2, 1);
    76     __ addi(R12_scratch2, R12_scratch2, 1);
    74     __ stw(R12_scratch2, offs, R11_scratch1);
    77     __ stw(R12_scratch2, offs, R11_scratch1);
    75   }
    78   }
    76 #endif
    79 #endif
    77 
    80 
    78   assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
    81   assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
    79 
    82 
       
    83   const Register rcvr_klass = R11_scratch1;
       
    84   address npe_addr = __ pc(); // npe = null pointer exception
       
    85   // check if we must do an explicit check (implicit checks disabled, offset too large).
       
    86   __ null_check(R3, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
    80   // Get receiver klass.
    87   // Get receiver klass.
    81   const Register rcvr_klass = R11_scratch1;
       
    82 
       
    83   // We might implicit NULL fault here.
       
    84   address npe_addr = __ pc(); // npe = null pointer exception
       
    85   __ null_check(R3, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
       
    86   __ load_klass(rcvr_klass, R3);
    88   __ load_klass(rcvr_klass, R3);
    87 
       
    88  // Set method (in case of interpreted method), and destination address.
       
    89   int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
       
    90 
    89 
    91 #ifndef PRODUCT
    90 #ifndef PRODUCT
    92   if (DebugVtables) {
    91   if (DebugVtables) {
    93     Label L;
    92     Label L;
    94     // Check offset vs vtable length.
    93     // Check offset vs vtable length.
   100     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
    99     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
   101     __ bind(L);
   100     __ bind(L);
   102   }
   101   }
   103 #endif
   102 #endif
   104 
   103 
   105   int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
   104   int entry_offset = in_bytes(Klass::vtable_start_offset()) +
       
   105                      vtable_index*vtableEntry::size_in_bytes();
       
   106   int v_off        = entry_offset + vtableEntry::method_offset_in_bytes();
   106 
   107 
   107   __ ld(R19_method, (RegisterOrConstant)v_off, rcvr_klass);
   108   __ ld(R19_method, (RegisterOrConstant)v_off, rcvr_klass);
   108 
   109 
   109 #ifndef PRODUCT
   110 #ifndef PRODUCT
   110   if (DebugVtables) {
   111   if (DebugVtables) {
   114     __ stop("Vtable entry is ZERO", 102);
   115     __ stop("Vtable entry is ZERO", 102);
   115     __ bind(L);
   116     __ bind(L);
   116   }
   117   }
   117 #endif
   118 #endif
   118 
   119 
   119   // If the vtable entry is null, the method is abstract.
       
   120   address ame_addr = __ pc(); // ame = abstract method error
   120   address ame_addr = __ pc(); // ame = abstract method error
       
   121                               // if the vtable entry is null, the method is abstract
       
   122                               // NOTE: for vtable dispatches, the vtable entry will never be null.
       
   123 
   121   __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/NULL);
   124   __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/NULL);
   122   __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
   125   __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
   123   __ mtctr(R12_scratch2);
   126   __ mtctr(R12_scratch2);
   124   __ bctr();
   127   __ bctr();
   125 
   128 
   126   masm->flush();
   129   masm->flush();
   127 
   130   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
   128   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
       
   129 
       
   130   s->set_exception_points(npe_addr, ame_addr);
       
   131 
   131 
   132   return s;
   132   return s;
   133 }
   133 }
   134 
   134 
   135 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   135 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   136   // PPC port: use fixed size.
   136   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
   137   const int code_length = VtableStub::pd_code_size_limit(false);
   137   const int stub_code_length = code_size_limit(false);
   138   VtableStub* s = new (code_length) VtableStub(false, itable_index);
   138   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
   139 
       
   140   // Can be NULL if there is no free space in the code cache.
   139   // Can be NULL if there is no free space in the code cache.
   141   if (s == NULL) {
   140   if (s == NULL) {
   142     return NULL;
   141     return NULL;
   143   }
   142   }
   144 
   143   // Count unused bytes in instruction sequences of variable size.
   145   ResourceMark rm;
   144   // We add them to the computed buffer size in order to avoid
   146   CodeBuffer cb(s->entry_point(), code_length);
   145   // overflow in subsequently generated stubs.
       
   146   address   start_pc;
       
   147   int       slop_bytes = 8; // just a two-instruction safety net
       
   148   int       slop_delta = 0;
       
   149 
       
   150   ResourceMark    rm;
       
   151   CodeBuffer      cb(s->entry_point(), stub_code_length);
   147   MacroAssembler* masm = new MacroAssembler(&cb);
   152   MacroAssembler* masm = new MacroAssembler(&cb);
   148   address start_pc;
   153   int             load_const_maxLen = 5*BytesPerInstWord;  // load_const generates 5 instructions. Assume that as max size for laod_const_optimized
   149 
   154 
   150 #ifndef PRODUCT
   155 #if (!defined(PRODUCT) && defined(COMPILER2))
   151   if (CountCompiledCalls) {
   156   if (CountCompiledCalls) {
       
   157     start_pc = __ pc();
   152     int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
   158     int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
       
   159     slop_delta  = load_const_maxLen - (__ pc() - start_pc);
       
   160     slop_bytes += slop_delta;
       
   161     assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
   153     __ lwz(R12_scratch2, offs, R11_scratch1);
   162     __ lwz(R12_scratch2, offs, R11_scratch1);
   154     __ addi(R12_scratch2, R12_scratch2, 1);
   163     __ addi(R12_scratch2, R12_scratch2, 1);
   155     __ stw(R12_scratch2, offs, R11_scratch1);
   164     __ stw(R12_scratch2, offs, R11_scratch1);
   156   }
   165   }
   157 #endif
   166 #endif
   207   // More detailed error message.
   216   // More detailed error message.
   208   // We force resolving of the call site by jumping to the "handle
   217   // We force resolving of the call site by jumping to the "handle
   209   // wrong method" stub, and so let the interpreter runtime do all the
   218   // wrong method" stub, and so let the interpreter runtime do all the
   210   // dirty work.
   219   // dirty work.
   211   __ bind(L_no_such_interface);
   220   __ bind(L_no_such_interface);
       
   221   start_pc = __ pc();
   212   __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2);
   222   __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2);
       
   223   slop_delta  = load_const_maxLen - (__ pc() - start_pc);
       
   224   slop_bytes += slop_delta;
       
   225   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
   213   __ mtctr(R11_scratch1);
   226   __ mtctr(R11_scratch1);
   214   __ bctr();
   227   __ bctr();
   215 
   228 
   216   masm->flush();
   229   masm->flush();
   217 
   230   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
   218   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
   231 
   219 
       
   220   s->set_exception_points(npe_addr, ame_addr);
       
   221   return s;
   232   return s;
   222 }
   233 }
   223 
   234 
   224 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
       
   225   if (DebugVtables || CountCompiledCalls || VerifyOops) {
       
   226     return 1000;
       
   227   }
       
   228   int size = is_vtable_stub ? 20 + 8 : 164 + 20; // Plain + safety
       
   229   if (UseCompressedClassPointers) {
       
   230     size += MacroAssembler::instr_size_for_decode_klass_not_null();
       
   231   }
       
   232   if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
       
   233     size += is_vtable_stub ? 8 : 12;
       
   234   }
       
   235   return size;
       
   236 }
       
   237 
       
   238 int VtableStub::pd_code_alignment() {
   235 int VtableStub::pd_code_alignment() {
       
   236   // Power cache line size is 128 bytes, but we want to limit alignment loss.
   239   const unsigned int icache_line_size = 32;
   237   const unsigned int icache_line_size = 32;
   240   return icache_line_size;
   238   return icache_line_size;
   241 }
   239 }