1 /* |
1 /* |
2 * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
125 __ andr(R1, R1, ~JNIHandles::weak_tag_mask); |
125 __ andr(R1, R1, ~JNIHandles::weak_tag_mask); |
126 #else |
126 #else |
127 __ bic(R1, R1, JNIHandles::weak_tag_mask); |
127 __ bic(R1, R1, JNIHandles::weak_tag_mask); |
128 #endif |
128 #endif |
129 |
129 |
130 if (os::is_MP()) { |
130 // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier |
131 // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier |
131 __ andr(Rtmp1, Rsafept_cnt, (unsigned)1); |
132 __ andr(Rtmp1, Rsafept_cnt, (unsigned)1); |
132 __ ldr(Robj, Address(R1, Rtmp1)); |
133 __ ldr(Robj, Address(R1, Rtmp1)); |
|
134 } else { |
|
135 __ ldr(Robj, Address(R1)); |
|
136 } |
|
137 |
133 |
138 #ifdef AARCH64 |
134 #ifdef AARCH64 |
139 __ add(Robj, Robj, AsmOperand(R2, lsr, 2)); |
135 __ add(Robj, Robj, AsmOperand(R2, lsr, 2)); |
140 Address field_addr = Address(Robj); |
136 Address field_addr = Address(Robj); |
141 #else |
137 #else |
196 #endif // __ABI_HARD__ |
192 #endif // __ABI_HARD__ |
197 default: |
193 default: |
198 ShouldNotReachHere(); |
194 ShouldNotReachHere(); |
199 } |
195 } |
200 |
196 |
201 if(os::is_MP()) { |
197 // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier |
202 // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier |
|
203 #if defined(__ABI_HARD__) && !defined(AARCH64) |
198 #if defined(__ABI_HARD__) && !defined(AARCH64) |
204 if (type == T_FLOAT || type == T_DOUBLE) { |
199 if (type == T_FLOAT || type == T_DOUBLE) { |
205 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr); |
200 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr); |
206 __ fmrrd(Rres, Rres_hi, D0); |
201 __ fmrrd(Rres, Rres_hi, D0); |
207 __ eor(Rtmp2, Rres, Rres); |
202 __ eor(Rtmp2, Rres, Rres); |
208 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2)); |
203 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2)); |
209 } else |
204 } else |
210 #endif // __ABI_HARD__ && !AARCH64 |
205 #endif // __ABI_HARD__ && !AARCH64 |
211 { |
206 { |
212 #ifndef AARCH64 |
207 #ifndef AARCH64 |
213 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr); |
208 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr); |
214 #endif // !AARCH64 |
209 #endif // !AARCH64 |
215 __ eor(Rtmp2, Rres, Rres); |
210 __ eor(Rtmp2, Rres, Rres); |
216 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2)); |
211 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2)); |
217 } |
|
218 } else { |
|
219 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr)); |
|
220 } |
212 } |
221 __ cmp(Rsafept_cnt2, Rsafept_cnt); |
213 __ cmp(Rsafept_cnt2, Rsafept_cnt); |
222 #ifdef AARCH64 |
214 #ifdef AARCH64 |
223 __ b(slow_case, ne); |
215 __ b(slow_case, ne); |
224 __ mov(R0, Rres); |
216 __ mov(R0, Rres); |