hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp
changeset 23491 f690330b10b9
parent 22910 88c3369b5967
child 24018 77b156916bab
equal deleted inserted replaced
23490:54fc219734a0 23491:f690330b10b9
    25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
    25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
    26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
    26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
    27 
    27 
    28 #include "asm/assembler.hpp"
    28 #include "asm/assembler.hpp"
    29 #include "utilities/macros.hpp"
    29 #include "utilities/macros.hpp"
       
    30 #include "runtime/rtmLocking.hpp"
    30 
    31 
    31 
    32 
    32 // MacroAssembler extends Assembler by frequently used macros.
    33 // MacroAssembler extends Assembler by frequently used macros.
    33 //
    34 //
    34 // Instructions for which a 'better' code sequence exists depending
    35 // Instructions for which a 'better' code sequence exists depending
   109     unsigned char op = branch[0];
   110     unsigned char op = branch[0];
   110     assert(op == 0xE8 /* call */ ||
   111     assert(op == 0xE8 /* call */ ||
   111         op == 0xE9 /* jmp */ ||
   112         op == 0xE9 /* jmp */ ||
   112         op == 0xEB /* short jmp */ ||
   113         op == 0xEB /* short jmp */ ||
   113         (op & 0xF0) == 0x70 /* short jcc */ ||
   114         (op & 0xF0) == 0x70 /* short jcc */ ||
   114         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
   115         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
       
   116         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
   115         "Invalid opcode at patch point");
   117         "Invalid opcode at patch point");
   116 
   118 
   117     if (op == 0xEB || (op & 0xF0) == 0x70) {
   119     if (op == 0xEB || (op & 0xF0) == 0x70) {
   118       // short offset operators (jmp and jcc)
   120       // short offset operators (jmp and jcc)
   119       char* disp = (char*) &branch[1];
   121       char* disp = (char*) &branch[1];
   120       int imm8 = target - (address) &disp[1];
   122       int imm8 = target - (address) &disp[1];
   121       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
   123       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
   122       *disp = imm8;
   124       *disp = imm8;
   123     } else {
   125     } else {
   124       int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
   126       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
   125       int imm32 = target - (address) &disp[1];
   127       int imm32 = target - (address) &disp[1];
   126       *disp = imm32;
   128       *disp = imm32;
   127     }
   129     }
   128   }
   130   }
   129 
   131 
   158   void incrementl(Address dst, int value = 1);
   160   void incrementl(Address dst, int value = 1);
   159   void incrementl(Register reg, int value = 1);
   161   void incrementl(Register reg, int value = 1);
   160 
   162 
   161   void incrementq(Register reg, int value = 1);
   163   void incrementq(Register reg, int value = 1);
   162   void incrementq(Address dst, int value = 1);
   164   void incrementq(Address dst, int value = 1);
   163 
       
   164 
   165 
   165   // Support optimal SSE move instructions.
   166   // Support optimal SSE move instructions.
   166   void movflt(XMMRegister dst, XMMRegister src) {
   167   void movflt(XMMRegister dst, XMMRegister src) {
   167     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
   168     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
   168     else                       { movss (dst, src); return; }
   169     else                       { movss (dst, src); return; }
   184   }
   185   }
   185   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
   186   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
   186 
   187 
   187   void incrementl(AddressLiteral dst);
   188   void incrementl(AddressLiteral dst);
   188   void incrementl(ArrayAddress dst);
   189   void incrementl(ArrayAddress dst);
       
   190 
       
   191   void incrementq(AddressLiteral dst);
   189 
   192 
   190   // Alignment
   193   // Alignment
   191   void align(int modulus);
   194   void align(int modulus);
   192 
   195 
   193   // A 5 byte nop that is safe for patching (see patch_verified_entry)
   196   // A 5 byte nop that is safe for patching (see patch_verified_entry)
   652                            BiasedLockingCounters* counters = NULL);
   655                            BiasedLockingCounters* counters = NULL);
   653   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
   656   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
   654 #ifdef COMPILER2
   657 #ifdef COMPILER2
   655   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
   658   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
   656   // See full desription in macroAssembler_x86.cpp.
   659   // See full desription in macroAssembler_x86.cpp.
   657   void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
   660   void fast_lock(Register obj, Register box, Register tmp,
   658   void fast_unlock(Register obj, Register box, Register tmp);
   661                  Register scr, Register cx1, Register cx2,
       
   662                  BiasedLockingCounters* counters,
       
   663                  RTMLockingCounters* rtm_counters,
       
   664                  RTMLockingCounters* stack_rtm_counters,
       
   665                  Metadata* method_data,
       
   666                  bool use_rtm, bool profile_rtm);
       
   667   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
       
   668 #if INCLUDE_RTM_OPT
       
   669   void rtm_counters_update(Register abort_status, Register rtm_counters);
       
   670   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
       
   671   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
       
   672                                    RTMLockingCounters* rtm_counters,
       
   673                                    Metadata* method_data);
       
   674   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
       
   675                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
       
   676   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
       
   677   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
       
   678   void rtm_stack_locking(Register obj, Register tmp, Register scr,
       
   679                          Register retry_on_abort_count,
       
   680                          RTMLockingCounters* stack_rtm_counters,
       
   681                          Metadata* method_data, bool profile_rtm,
       
   682                          Label& DONE_LABEL, Label& IsInflated);
       
   683   void rtm_inflated_locking(Register obj, Register box, Register tmp,
       
   684                             Register scr, Register retry_on_busy_count,
       
   685                             Register retry_on_abort_count,
       
   686                             RTMLockingCounters* rtm_counters,
       
   687                             Metadata* method_data, bool profile_rtm,
       
   688                             Label& DONE_LABEL);
       
   689 #endif
   659 #endif
   690 #endif
   660 
   691 
   661   Condition negate_condition(Condition cond);
   692   Condition negate_condition(Condition cond);
   662 
   693 
   663   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
   694   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
   719 
   750 
   720   void locked_cmpxchgptr(Register reg, AddressLiteral adr);
   751   void locked_cmpxchgptr(Register reg, AddressLiteral adr);
   721 
   752 
   722 
   753 
   723   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
   754   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
       
   755   void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
   724 
   756 
   725 
   757 
   726   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
   758   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
   727 
   759 
   728   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
   760   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
   760 
   792 
   761   // Helper functions for statistics gathering.
   793   // Helper functions for statistics gathering.
   762   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
   794   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
   763   void cond_inc32(Condition cond, AddressLiteral counter_addr);
   795   void cond_inc32(Condition cond, AddressLiteral counter_addr);
   764   // Unconditional atomic increment.
   796   // Unconditional atomic increment.
   765   void atomic_incl(AddressLiteral counter_addr);
   797   void atomic_incl(Address counter_addr);
       
   798   void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
       
   799 #ifdef _LP64
       
   800   void atomic_incq(Address counter_addr);
       
   801   void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
       
   802 #endif
       
   803   void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
       
   804   void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
   766 
   805 
   767   void lea(Register dst, AddressLiteral adr);
   806   void lea(Register dst, AddressLiteral adr);
   768   void lea(Address dst, AddressLiteral adr);
   807   void lea(Address dst, AddressLiteral adr);
   769   void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
   808   void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
   770 
   809 
  1072   // can this do an lea?
  1111   // can this do an lea?
  1073   void movptr(Register dst, ArrayAddress src);
  1112   void movptr(Register dst, ArrayAddress src);
  1074 
  1113 
  1075   void movptr(Register dst, Address src);
  1114   void movptr(Register dst, Address src);
  1076 
  1115 
  1077   void movptr(Register dst, AddressLiteral src);
  1116 #ifdef _LP64
       
  1117   void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
       
  1118 #else
       
  1119   void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
       
  1120 #endif
  1078 
  1121 
  1079   void movptr(Register dst, intptr_t src);
  1122   void movptr(Register dst, intptr_t src);
  1080   void movptr(Register dst, Register src);
  1123   void movptr(Register dst, Register src);
  1081   void movptr(Address dst, intptr_t src);
  1124   void movptr(Address dst, intptr_t src);
  1082 
  1125