1904 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); |
1904 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); |
1905 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); |
1905 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); |
1906 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); |
1906 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); |
1907 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); |
1907 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); |
1908 Register addr = op->addr()->as_register(); |
1908 Register addr = op->addr()->as_register(); |
1909 if (os::is_MP()) { |
1909 __ lock(); |
1910 __ lock(); |
|
1911 } |
|
1912 NOT_LP64(__ cmpxchg8(Address(addr, 0))); |
1910 NOT_LP64(__ cmpxchg8(Address(addr, 0))); |
1913 |
1911 |
1914 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { |
1912 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { |
1915 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) |
1913 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) |
1916 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); |
1914 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); |
1926 #ifdef _LP64 |
1924 #ifdef _LP64 |
1927 if (UseCompressedOops) { |
1925 if (UseCompressedOops) { |
1928 __ encode_heap_oop(cmpval); |
1926 __ encode_heap_oop(cmpval); |
1929 __ mov(rscratch1, newval); |
1927 __ mov(rscratch1, newval); |
1930 __ encode_heap_oop(rscratch1); |
1928 __ encode_heap_oop(rscratch1); |
1931 if (os::is_MP()) { |
1929 __ lock(); |
1932 __ lock(); |
|
1933 } |
|
1934 // cmpval (rax) is implicitly used by this instruction |
1930 // cmpval (rax) is implicitly used by this instruction |
1935 __ cmpxchgl(rscratch1, Address(addr, 0)); |
1931 __ cmpxchgl(rscratch1, Address(addr, 0)); |
1936 } else |
1932 } else |
1937 #endif |
1933 #endif |
1938 { |
1934 { |
1939 if (os::is_MP()) { |
1935 __ lock(); |
1940 __ lock(); |
|
1941 } |
|
1942 __ cmpxchgptr(newval, Address(addr, 0)); |
1936 __ cmpxchgptr(newval, Address(addr, 0)); |
1943 } |
1937 } |
1944 } else { |
1938 } else { |
1945 assert(op->code() == lir_cas_int, "lir_cas_int expected"); |
1939 assert(op->code() == lir_cas_int, "lir_cas_int expected"); |
1946 if (os::is_MP()) { |
1940 __ lock(); |
1947 __ lock(); |
|
1948 } |
|
1949 __ cmpxchgl(newval, Address(addr, 0)); |
1941 __ cmpxchgl(newval, Address(addr, 0)); |
1950 } |
1942 } |
1951 #ifdef _LP64 |
1943 #ifdef _LP64 |
1952 } else if (op->code() == lir_cas_long) { |
1944 } else if (op->code() == lir_cas_long) { |
1953 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); |
1945 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); |
1956 assert(cmpval == rax, "wrong register"); |
1948 assert(cmpval == rax, "wrong register"); |
1957 assert(newval != NULL, "new val must be register"); |
1949 assert(newval != NULL, "new val must be register"); |
1958 assert(cmpval != newval, "cmp and new values must be in different registers"); |
1950 assert(cmpval != newval, "cmp and new values must be in different registers"); |
1959 assert(cmpval != addr, "cmp and addr must be in different registers"); |
1951 assert(cmpval != addr, "cmp and addr must be in different registers"); |
1960 assert(newval != addr, "new value and addr must be in different registers"); |
1952 assert(newval != addr, "new value and addr must be in different registers"); |
1961 if (os::is_MP()) { |
1953 __ lock(); |
1962 __ lock(); |
|
1963 } |
|
1964 __ cmpxchgq(newval, Address(addr, 0)); |
1954 __ cmpxchgq(newval, Address(addr, 0)); |
1965 #endif // _LP64 |
1955 #endif // _LP64 |
1966 } else { |
1956 } else { |
1967 Unimplemented(); |
1957 Unimplemented(); |
1968 } |
1958 } |
2802 } |
2792 } |
2803 } |
2793 } |
2804 |
2794 |
2805 |
2795 |
2806 void LIR_Assembler::align_call(LIR_Code code) { |
2796 void LIR_Assembler::align_call(LIR_Code code) { |
2807 if (os::is_MP()) { |
2797 // make sure that the displacement word of the call ends up word aligned |
2808 // make sure that the displacement word of the call ends up word aligned |
2798 int offset = __ offset(); |
2809 int offset = __ offset(); |
2799 switch (code) { |
2810 switch (code) { |
2800 case lir_static_call: |
2811 case lir_static_call: |
2801 case lir_optvirtual_call: |
2812 case lir_optvirtual_call: |
2802 case lir_dynamic_call: |
2813 case lir_dynamic_call: |
2803 offset += NativeCall::displacement_offset; |
2814 offset += NativeCall::displacement_offset; |
2804 break; |
2815 break; |
2805 case lir_icvirtual_call: |
2816 case lir_icvirtual_call: |
2806 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; |
2817 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; |
2807 break; |
2818 break; |
2808 case lir_virtual_call: // currently, sparc-specific for niagara |
2819 case lir_virtual_call: // currently, sparc-specific for niagara |
2809 default: ShouldNotReachHere(); |
2820 default: ShouldNotReachHere(); |
2810 } |
2821 } |
2811 __ align(BytesPerWord, offset); |
2822 __ align(BytesPerWord, offset); |
|
2823 } |
|
2824 } |
2812 } |
2825 |
2813 |
2826 |
2814 |
2827 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { |
2815 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { |
2828 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, |
2816 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, |
2829 "must be aligned"); |
2817 "must be aligned"); |
2830 __ call(AddressLiteral(op->addr(), rtype)); |
2818 __ call(AddressLiteral(op->addr(), rtype)); |
2831 add_call_info(code_offset(), op->info()); |
2819 add_call_info(code_offset(), op->info()); |
2832 } |
2820 } |
2833 |
2821 |
2834 |
2822 |
2835 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { |
2823 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { |
2836 __ ic_call(op->addr()); |
2824 __ ic_call(op->addr()); |
2837 add_call_info(code_offset(), op->info()); |
2825 add_call_info(code_offset(), op->info()); |
2838 assert(!os::is_MP() || |
2826 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, |
2839 (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, |
|
2840 "must be aligned"); |
2827 "must be aligned"); |
2841 } |
2828 } |
2842 |
2829 |
2843 |
2830 |
2844 /* Currently, vtable-dispatch is only enabled for sparc platforms */ |
2831 /* Currently, vtable-dispatch is only enabled for sparc platforms */ |
2854 bailout("static call stub overflow"); |
2841 bailout("static call stub overflow"); |
2855 return; |
2842 return; |
2856 } |
2843 } |
2857 |
2844 |
2858 int start = __ offset(); |
2845 int start = __ offset(); |
2859 if (os::is_MP()) { |
2846 |
2860 // make sure that the displacement word of the call ends up word aligned |
2847 // make sure that the displacement word of the call ends up word aligned |
2861 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset); |
2848 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset); |
2862 } |
|
2863 __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */)); |
2849 __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */)); |
2864 __ mov_metadata(rbx, (Metadata*)NULL); |
2850 __ mov_metadata(rbx, (Metadata*)NULL); |
2865 // must be set to -1 at code generation time |
2851 // must be set to -1 at code generation time |
2866 assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); |
2852 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned"); |
2867 // On 64bit this will die since it will take a movq & jmp, must be only a jmp |
2853 // On 64bit this will die since it will take a movq & jmp, must be only a jmp |
2868 __ jump(RuntimeAddress(__ pc())); |
2854 __ jump(RuntimeAddress(__ pc())); |
2869 |
2855 |
2870 if (UseAOT) { |
2856 if (UseAOT) { |
2871 // Trampoline to aot code |
2857 // Trampoline to aot code |
3990 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { |
3976 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { |
3991 assert(data == dest, "xchg/xadd uses only 2 operands"); |
3977 assert(data == dest, "xchg/xadd uses only 2 operands"); |
3992 |
3978 |
3993 if (data->type() == T_INT) { |
3979 if (data->type() == T_INT) { |
3994 if (code == lir_xadd) { |
3980 if (code == lir_xadd) { |
3995 if (os::is_MP()) { |
3981 __ lock(); |
3996 __ lock(); |
|
3997 } |
|
3998 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); |
3982 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); |
3999 } else { |
3983 } else { |
4000 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); |
3984 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); |
4001 } |
3985 } |
4002 } else if (data->is_oop()) { |
3986 } else if (data->is_oop()) { |
4015 #endif |
3999 #endif |
4016 } else if (data->type() == T_LONG) { |
4000 } else if (data->type() == T_LONG) { |
4017 #ifdef _LP64 |
4001 #ifdef _LP64 |
4018 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); |
4002 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); |
4019 if (code == lir_xadd) { |
4003 if (code == lir_xadd) { |
4020 if (os::is_MP()) { |
4004 __ lock(); |
4021 __ lock(); |
|
4022 } |
|
4023 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); |
4005 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); |
4024 } else { |
4006 } else { |
4025 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); |
4007 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); |
4026 } |
4008 } |
4027 #else |
4009 #else |