hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp
changeset 23221 b70675ece1ce
parent 23211 954e3a81da29
child 24349 d8f40e5b392d
equal deleted inserted replaced
23220:fc827339dc37 23221:b70675ece1ce
     1 /*
     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2012, 2013 SAP AG. All rights reserved.
     3  * Copyright 2012, 2014 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     8  * published by the Free Software Foundation.
    49 #include "utilities/debug.hpp"
    49 #include "utilities/debug.hpp"
    50 #ifdef COMPILER1
    50 #ifdef COMPILER1
    51 #include "c1/c1_Runtime1.hpp"
    51 #include "c1/c1_Runtime1.hpp"
    52 #endif
    52 #endif
    53 
    53 
    54 #ifndef CC_INTERP
       
    55 #error "CC_INTERP must be defined on PPC"
       
    56 #endif
       
    57 
       
    58 #define __ _masm->
    54 #define __ _masm->
    59 
    55 
    60 #ifdef PRODUCT
    56 #ifdef PRODUCT
    61 #define BLOCK_COMMENT(str) // nothing
    57 #define BLOCK_COMMENT(str) // nothing
    62 #else
    58 #else
   145 
   141 
   146   // Reload method, it may have moved.
   142   // Reload method, it may have moved.
   147 #ifdef CC_INTERP
   143 #ifdef CC_INTERP
   148   __ ld(R19_method, state_(_method));
   144   __ ld(R19_method, state_(_method));
   149 #else
   145 #else
   150   __ unimplemented("slow signature handler 1");
   146   __ ld(R19_method, 0, target_sp);
       
   147   __ ld(R19_method, _ijava_state_neg(method), R19_method);
   151 #endif
   148 #endif
   152 
   149 
   153   // Get the result handler.
   150   // Get the result handler.
   154   __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
   151   __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
   155 
   152 
   156   // Reload method, it may have moved.
   153   // Reload method, it may have moved.
   157 #ifdef CC_INTERP
   154 #ifdef CC_INTERP
   158   __ ld(R19_method, state_(_method));
   155   __ ld(R19_method, state_(_method));
   159 #else
   156 #else
   160   __ unimplemented("slow signature handler 2");
   157   __ ld(R19_method, 0, target_sp);
       
   158   __ ld(R19_method, _ijava_state_neg(method), R19_method);
   161 #endif
   159 #endif
   162 
   160 
   163   {
   161   {
   164     Label L;
   162     Label L;
   165     // test if static
   163     // test if static
   451   address entry = __ pc();
   449   address entry = __ pc();
   452 
   450 
   453   //
   451   //
   454   // Registers alive
   452   // Registers alive
   455   //   R16_thread     - JavaThread*
   453   //   R16_thread     - JavaThread*
   456   //   R19_method     - callee's methodOop (method to be invoked)
   454   //   R19_method     - callee's method (method to be invoked)
   457   //   R1_SP          - SP prepared such that caller's outgoing args are near top
   455   //   R1_SP          - SP prepared such that caller's outgoing args are near top
   458   //   LR             - return address to caller
   456   //   LR             - return address to caller
   459   //
   457   //
   460   // Stack layout at this point:
   458   // Stack layout at this point:
   461   //
   459   //
   489 
   487 
   490 #ifdef CC_INTERP
   488 #ifdef CC_INTERP
   491   // Return to frame manager, it will handle the pending exception.
   489   // Return to frame manager, it will handle the pending exception.
   492   __ blr();
   490   __ blr();
   493 #else
   491 #else
   494   Unimplemented();
   492   // We don't know our caller, so jump to the general forward exception stub,
       
   493   // which will also pop our full frame off. Satisfy the interface of
       
   494   // SharedRuntime::generate_forward_exception()
       
   495   __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0);
       
   496   __ mtctr(R11_scratch1);
       
   497   __ bctr();
   495 #endif
   498 #endif
   496 
   499 
   497   return entry;
   500   return entry;
   498 }
   501 }
   499 
   502 
   500 // Call an accessor method (assuming it is resolved, otherwise drop into
   503 // Call an accessor method (assuming it is resolved, otherwise drop into
   501 // vanilla (slow path) entry.
   504 // vanilla (slow path) entry.
   502 address InterpreterGenerator::generate_accessor_entry(void) {
   505 address InterpreterGenerator::generate_accessor_entry(void) {
   503   if(!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods)))
   506   if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
   504     return NULL;
   507     return NULL;
       
   508   }
   505 
   509 
   506   Label Lslow_path, Lacquire;
   510   Label Lslow_path, Lacquire;
   507 
   511 
   508   const Register
   512   const Register
   509          Rclass_or_obj = R3_ARG1,
   513          Rclass_or_obj = R3_ARG1,
   584   __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
   588   __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
   585 
   589 
   586   // Load from branch table and dispatch (volatile case: one instruction ahead)
   590   // Load from branch table and dispatch (volatile case: one instruction ahead)
   587   __ sldi(Rflags, Rflags, LogBytesPerWord);
   591   __ sldi(Rflags, Rflags, LogBytesPerWord);
   588   __ cmpwi(CCR6, Rscratch, 1); // volatile?
   592   __ cmpwi(CCR6, Rscratch, 1); // volatile?
   589   __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
   593   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
       
   594     __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
       
   595   }
   590   __ ldx(Rbtable, Rbtable, Rflags);
   596   __ ldx(Rbtable, Rbtable, Rflags);
   591 
   597 
   592   __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
   598   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
       
   599     __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
       
   600   }
   593   __ mtctr(Rbtable);
   601   __ mtctr(Rbtable);
   594   __ bctr();
   602   __ bctr();
   595 
   603 
   596 #ifdef ASSERT
   604 #ifdef ASSERT
   597   __ bind(LFlagInvalid);
   605   __ bind(LFlagInvalid);
   603     all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
   611     all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
   604     all_initialized   = all_initialized   && (branch_table[i] != NULL);
   612     all_initialized   = all_initialized   && (branch_table[i] != NULL);
   605   }
   613   }
   606   assert(all_uninitialized != all_initialized, "consistency"); // either or
   614   assert(all_uninitialized != all_initialized, "consistency"); // either or
   607 
   615 
   608   __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
   616   __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
   609   if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
   617   if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
   610   if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
   618   if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
   611   if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
   619   if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
   612   __ stop("unexpected type", 0x6551);
   620   __ stop("unexpected type", 0x6551);
   613 #endif
   621 #endif
   614 
   622 
   615   if (branch_table[itos] == 0) { // generate only once
   623   if (branch_table[itos] == 0) { // generate only once
   616     __ align(32, 28, 28); // align load
   624     __ align(32, 28, 28); // align load
   617     __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
   625     __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
   618     branch_table[itos] = __ pc(); // non-volatile_entry point
   626     branch_table[itos] = __ pc(); // non-volatile_entry point
   619     __ lwax(R3_RET, Rclass_or_obj, Roffset);
   627     __ lwax(R3_RET, Rclass_or_obj, Roffset);
   620     __ beq(CCR6, Lacquire);
   628     __ beq(CCR6, Lacquire);
   621     __ blr();
   629     __ blr();
   622   }
   630   }
   623 
   631 
   624   if (branch_table[ltos] == 0) { // generate only once
   632   if (branch_table[ltos] == 0) { // generate only once
   625     __ align(32, 28, 28); // align load
   633     __ align(32, 28, 28); // align load
   626     __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
   634     __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
   627     branch_table[ltos] = __ pc(); // non-volatile_entry point
   635     branch_table[ltos] = __ pc(); // non-volatile_entry point
   628     __ ldx(R3_RET, Rclass_or_obj, Roffset);
   636     __ ldx(R3_RET, Rclass_or_obj, Roffset);
   629     __ beq(CCR6, Lacquire);
   637     __ beq(CCR6, Lacquire);
   630     __ blr();
   638     __ blr();
   631   }
   639   }
   632 
   640 
   633   if (branch_table[btos] == 0) { // generate only once
   641   if (branch_table[btos] == 0) { // generate only once
   634     __ align(32, 28, 28); // align load
   642     __ align(32, 28, 28); // align load
   635     __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
   643     __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
   636     branch_table[btos] = __ pc(); // non-volatile_entry point
   644     branch_table[btos] = __ pc(); // non-volatile_entry point
   637     __ lbzx(R3_RET, Rclass_or_obj, Roffset);
   645     __ lbzx(R3_RET, Rclass_or_obj, Roffset);
   638     __ extsb(R3_RET, R3_RET);
   646     __ extsb(R3_RET, R3_RET);
   639     __ beq(CCR6, Lacquire);
   647     __ beq(CCR6, Lacquire);
   640     __ blr();
   648     __ blr();
   641   }
   649   }
   642 
   650 
   643   if (branch_table[ctos] == 0) { // generate only once
   651   if (branch_table[ctos] == 0) { // generate only once
   644     __ align(32, 28, 28); // align load
   652     __ align(32, 28, 28); // align load
   645     __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
   653     __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
   646     branch_table[ctos] = __ pc(); // non-volatile_entry point
   654     branch_table[ctos] = __ pc(); // non-volatile_entry point
   647     __ lhzx(R3_RET, Rclass_or_obj, Roffset);
   655     __ lhzx(R3_RET, Rclass_or_obj, Roffset);
   648     __ beq(CCR6, Lacquire);
   656     __ beq(CCR6, Lacquire);
   649     __ blr();
   657     __ blr();
   650   }
   658   }
   651 
   659 
   652   if (branch_table[stos] == 0) { // generate only once
   660   if (branch_table[stos] == 0) { // generate only once
   653     __ align(32, 28, 28); // align load
   661     __ align(32, 28, 28); // align load
   654     __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
   662     __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
   655     branch_table[stos] = __ pc(); // non-volatile_entry point
   663     branch_table[stos] = __ pc(); // non-volatile_entry point
   656     __ lhax(R3_RET, Rclass_or_obj, Roffset);
   664     __ lhax(R3_RET, Rclass_or_obj, Roffset);
   657     __ beq(CCR6, Lacquire);
   665     __ beq(CCR6, Lacquire);
   658     __ blr();
   666     __ blr();
   659   }
   667   }
   660 
   668 
   661   if (branch_table[atos] == 0) { // generate only once
   669   if (branch_table[atos] == 0) { // generate only once
   662     __ align(32, 28, 28); // align load
   670     __ align(32, 28, 28); // align load
   663     __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
   671     __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
   664     branch_table[atos] = __ pc(); // non-volatile_entry point
   672     branch_table[atos] = __ pc(); // non-volatile_entry point
   665     __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
   673     __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
   666     __ verify_oop(R3_RET);
   674     __ verify_oop(R3_RET);
   667     //__ dcbt(R3_RET); // prefetch
   675     //__ dcbt(R3_RET); // prefetch
   668     __ beq(CCR6, Lacquire);
   676     __ beq(CCR6, Lacquire);
   681     //tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
   689     //tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
   682   }
   690   }
   683 #endif
   691 #endif
   684 
   692 
   685   __ bind(Lslow_path);
   693   __ bind(Lslow_path);
   686   assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
   694   __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
   687   __ load_const_optimized(Rscratch, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
       
   688   __ mtctr(Rscratch);
       
   689   __ bctr();
       
   690   __ flush();
   695   __ flush();
   691 
   696 
   692   return entry;
   697   return entry;
   693 }
   698 }
   694 
   699 
   771 
   776 
   772     __ blr();
   777     __ blr();
   773 
   778 
   774     // Generate regular method entry.
   779     // Generate regular method entry.
   775     __ bind(slow_path);
   780     __ bind(slow_path);
   776     assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
   781     __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
   777     __ load_const_optimized(R11_scratch1, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
       
   778     __ mtctr(R11_scratch1);
       
   779     __ bctr();
       
   780     __ flush();
   782     __ flush();
   781 
   783 
   782     return entry;
   784     return entry;
   783   } else {
   785   } else {
   784     return generate_accessor_entry();
   786     return generate_accessor_entry();