1 /* |
1 /* |
2 * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
30 #include "gc/z/zOop.inline.hpp" |
30 #include "gc/z/zOop.inline.hpp" |
31 #include "gc/z/zResurrection.inline.hpp" |
31 #include "gc/z/zResurrection.inline.hpp" |
32 #include "oops/oop.hpp" |
32 #include "oops/oop.hpp" |
33 #include "runtime/atomic.hpp" |
33 #include "runtime/atomic.hpp" |
34 |
34 |
|
35 inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) { |
|
36 if (heal_addr == 0) { |
|
37 // Never heal with null since it interacts badly with reference processing. |
|
38 // A mutator clearing an oop would be similar to calling Reference.clear(), |
|
39 // which would make the reference non-discoverable or silently dropped |
|
40 // by the reference processor. |
|
41 return; |
|
42 } |
|
43 |
|
44 for (;;) { |
|
45 if (addr == heal_addr) { |
|
46 // Already healed |
|
47 return; |
|
48 } |
|
49 |
|
50 // Heal |
|
51 const uintptr_t prev_addr = Atomic::cmpxchg(heal_addr, (volatile uintptr_t*)p, addr); |
|
52 if (prev_addr == addr) { |
|
53 // Success |
|
54 return; |
|
55 } |
|
56 |
|
57 if (ZAddress::is_good_or_null(prev_addr)) { |
|
58 // No need to heal |
|
59 return; |
|
60 } |
|
61 |
|
62 // The oop location was healed by another barrier, but it is still not |
|
63 // good or null. Re-apply healing to make sure the oop is not left with |
|
64 // weaker (remapped or finalizable) metadata bits than what this barrier |
|
65 // tried to apply. |
|
66 assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset"); |
|
67 addr = prev_addr; |
|
68 } |
|
69 } |
|
70 |
35 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> |
71 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> |
36 inline oop ZBarrier::barrier(volatile oop* p, oop o) { |
72 inline oop ZBarrier::barrier(volatile oop* p, oop o) { |
37 uintptr_t addr = ZOop::to_address(o); |
73 uintptr_t addr = ZOop::to_address(o); |
38 |
74 |
39 retry: |
|
40 // Fast path |
75 // Fast path |
41 if (fast_path(addr)) { |
76 if (fast_path(addr)) { |
42 return ZOop::from_address(addr); |
77 return ZOop::from_address(addr); |
43 } |
78 } |
44 |
79 |
45 // Slow path |
80 // Slow path |
46 const uintptr_t good_addr = slow_path(addr); |
81 const uintptr_t good_addr = slow_path(addr); |
47 |
82 |
48 // Self heal, but only if the address was actually updated by the slow path, |
83 if (p != NULL) { |
49 // which might not be the case, e.g. when marking through an already good oop. |
84 self_heal(p, addr, good_addr); |
50 if (p != NULL && good_addr != addr) { |
|
51 const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr); |
|
52 if (prev_addr != addr) { |
|
53 // Some other thread overwrote the oop. If this oop was updated by a |
|
54 // weak barrier the new oop might not be good, in which case we need |
|
55 // to re-apply this barrier. |
|
56 addr = prev_addr; |
|
57 goto retry; |
|
58 } |
|
59 } |
85 } |
60 |
86 |
61 return ZOop::from_address(good_addr); |
87 return ZOop::from_address(good_addr); |
62 } |
88 } |
63 |
89 |
71 // to ensure that the currently active heap view is used. |
97 // to ensure that the currently active heap view is used. |
72 return ZOop::from_address(ZAddress::good_or_null(addr)); |
98 return ZOop::from_address(ZAddress::good_or_null(addr)); |
73 } |
99 } |
74 |
100 |
75 // Slow path |
101 // Slow path |
76 uintptr_t good_addr = slow_path(addr); |
102 const uintptr_t good_addr = slow_path(addr); |
77 |
103 |
78 // Self heal unless the address returned from the slow path is null, |
104 if (p != NULL) { |
79 // in which case resurrection was blocked and we must let the reference |
105 // The slow path returns a good/marked address or null, but we never mark |
80 // processor clear the oop. Mutators are not allowed to clear oops in |
106 // oops in a weak load barrier so we always heal with the remapped address. |
81 // these cases, since that would be similar to calling Reference.clear(), |
107 self_heal(p, addr, ZAddress::remapped_or_null(good_addr)); |
82 // which would make the reference non-discoverable or silently dropped |
|
83 // by the reference processor. |
|
84 if (p != NULL && good_addr != 0) { |
|
85 // The slow path returns a good/marked address, but we never mark oops |
|
86 // in a weak load barrier so we always self heal with the remapped address. |
|
87 const uintptr_t weak_good_addr = ZAddress::remapped(good_addr); |
|
88 const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr); |
|
89 if (prev_addr != addr) { |
|
90 // Some other thread overwrote the oop. The new |
|
91 // oop is guaranteed to be weak good or null. |
|
92 assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite"); |
|
93 |
|
94 // Return the good address instead of the weak good address |
|
95 // to ensure that the currently active heap view is used. |
|
96 good_addr = ZAddress::good_or_null(prev_addr); |
|
97 } |
|
98 } |
108 } |
99 |
109 |
100 return ZOop::from_address(good_addr); |
110 return ZOop::from_address(good_addr); |
101 } |
111 } |
102 |
112 |
132 |
142 |
133 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) { |
143 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) { |
134 return ZAddress::is_weak_good_or_null(addr); |
144 return ZAddress::is_weak_good_or_null(addr); |
135 } |
145 } |
136 |
146 |
137 inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) { |
|
138 const bool is_blocked = ZResurrection::is_blocked(); |
|
139 |
|
140 // Reload oop after checking the resurrection blocked state. This is |
|
141 // done to prevent a race where we first load an oop, which is logically |
|
142 // null but not yet cleared, then this oop is cleared by the reference |
|
143 // processor and resurrection is unblocked. At this point the mutator |
|
144 // would see the unblocked state and pass this invalid oop through the |
|
145 // normal barrier path, which would incorrectly try to mark this oop. |
|
146 if (p != NULL) { |
|
147 // First assign to reloaded_o to avoid compiler warning about |
|
148 // implicit dereference of volatile oop. |
|
149 const oop reloaded_o = *p; |
|
150 *o = reloaded_o; |
|
151 } |
|
152 |
|
153 return is_blocked; |
|
154 } |
|
155 |
|
156 // |
147 // |
157 // Load barrier |
148 // Load barrier |
158 // |
149 // |
159 inline oop ZBarrier::load_barrier_on_oop(oop o) { |
150 inline oop ZBarrier::load_barrier_on_oop(oop o) { |
160 return load_barrier_on_oop_field_preloaded((oop*)NULL, o); |
151 return load_barrier_on_oop_field_preloaded((oop*)NULL, o); |
188 } |
179 } |
189 |
180 |
190 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { |
181 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { |
191 verify_on_weak(p); |
182 verify_on_weak(p); |
192 |
183 |
193 if (is_resurrection_blocked(p, &o)) { |
184 if (ZResurrection::is_blocked()) { |
194 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); |
185 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); |
195 } |
186 } |
196 |
187 |
197 return load_barrier_on_oop_field_preloaded(p, o); |
188 return load_barrier_on_oop_field_preloaded(p, o); |
198 } |
189 } |
199 |
190 |
200 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { |
191 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { |
201 if (is_resurrection_blocked(p, &o)) { |
192 if (ZResurrection::is_blocked()) { |
202 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); |
193 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); |
203 } |
194 } |
204 |
195 |
205 return load_barrier_on_oop_field_preloaded(p, o); |
196 return load_barrier_on_oop_field_preloaded(p, o); |
206 } |
197 } |
207 |
198 |
233 } |
224 } |
234 |
225 |
235 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { |
226 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { |
236 verify_on_weak(p); |
227 verify_on_weak(p); |
237 |
228 |
238 if (is_resurrection_blocked(p, &o)) { |
229 if (ZResurrection::is_blocked()) { |
239 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); |
230 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); |
240 } |
231 } |
241 |
232 |
242 return weak_load_barrier_on_oop_field_preloaded(p, o); |
233 return weak_load_barrier_on_oop_field_preloaded(p, o); |
243 } |
234 } |
244 |
235 |
250 const oop o = *p; |
241 const oop o = *p; |
251 return weak_load_barrier_on_phantom_oop_field_preloaded(p, o); |
242 return weak_load_barrier_on_phantom_oop_field_preloaded(p, o); |
252 } |
243 } |
253 |
244 |
254 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { |
245 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { |
255 if (is_resurrection_blocked(p, &o)) { |
246 if (ZResurrection::is_blocked()) { |
256 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); |
247 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); |
257 } |
248 } |
258 |
249 |
259 return weak_load_barrier_on_oop_field_preloaded(p, o); |
250 return weak_load_barrier_on_oop_field_preloaded(p, o); |
260 } |
251 } |
261 |
252 |