author | kbarrett |
Fri, 15 Jun 2018 16:53:58 -0400 | |
changeset 50599 | ecc2af326b5f |
parent 50180 | ffa644980dff |
child 51482 | d7029542d67a |
permissions | -rw-r--r-- |
50180 | 1 |
/* |
2 |
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "precompiled.hpp" |
|
26 |
#include "gc/shared/c2/barrierSetC2.hpp" |
|
27 |
#include "opto/arraycopynode.hpp" |
|
28 |
#include "opto/graphKit.hpp" |
|
29 |
#include "opto/idealKit.hpp" |
|
30 |
#include "opto/narrowptrnode.hpp" |
|
31 |
#include "utilities/macros.hpp" |
|
32 |
||
33 |
// By default this is a no-op. |
|
34 |
void BarrierSetC2::resolve_address(C2Access& access) const { } |
|
35 |
||
36 |
void* C2Access::barrier_set_state() const { |
|
37 |
return _kit->barrier_set_state(); |
|
38 |
} |
|
39 |
||
40 |
bool C2Access::needs_cpu_membar() const { |
|
41 |
bool mismatched = (_decorators & C2_MISMATCHED) != 0; |
|
42 |
bool is_unordered = (_decorators & MO_UNORDERED) != 0; |
|
43 |
bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0; |
|
50599
ecc2af326b5f
8204939: Change Access nomenclature: root to native
kbarrett
parents:
50180
diff
changeset
|
44 |
bool in_heap = (_decorators & IN_HEAP) != 0; |
50180 | 45 |
|
46 |
bool is_write = (_decorators & C2_WRITE_ACCESS) != 0; |
|
47 |
bool is_read = (_decorators & C2_READ_ACCESS) != 0; |
|
48 |
bool is_atomic = is_read && is_write; |
|
49 |
||
50 |
if (is_atomic) { |
|
51 |
// Atomics always need to be wrapped in CPU membars |
|
52 |
return true; |
|
53 |
} |
|
54 |
||
55 |
if (anonymous) { |
|
56 |
// We will need memory barriers unless we can determine a unique |
|
57 |
// alias category for this reference. (Note: If for some reason |
|
58 |
// the barriers get omitted and the unsafe reference begins to "pollute" |
|
59 |
// the alias analysis of the rest of the graph, either Compile::can_alias |
|
60 |
// or Compile::must_alias will throw a diagnostic assert.) |
|
50599
ecc2af326b5f
8204939: Change Access nomenclature: root to native
kbarrett
parents:
50180
diff
changeset
|
61 |
if (!in_heap || !is_unordered || (mismatched && !_addr.type()->isa_aryptr())) { |
50180 | 62 |
return true; |
63 |
} |
|
64 |
} |
|
65 |
||
66 |
return false; |
|
67 |
} |
|
68 |
||
69 |
Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { |
|
70 |
DecoratorSet decorators = access.decorators(); |
|
71 |
GraphKit* kit = access.kit(); |
|
72 |
||
73 |
bool mismatched = (decorators & C2_MISMATCHED) != 0; |
|
74 |
bool unaligned = (decorators & C2_UNALIGNED) != 0; |
|
75 |
bool requires_atomic_access = (decorators & MO_UNORDERED) == 0; |
|
76 |
||
50599
ecc2af326b5f
8204939: Change Access nomenclature: root to native
kbarrett
parents:
50180
diff
changeset
|
77 |
bool in_native = (decorators & IN_NATIVE) != 0; |
ecc2af326b5f
8204939: Change Access nomenclature: root to native
kbarrett
parents:
50180
diff
changeset
|
78 |
assert(!in_native, "not supported yet"); |
50180 | 79 |
|
80 |
if (access.type() == T_DOUBLE) { |
|
81 |
Node* new_val = kit->dstore_rounding(val.node()); |
|
82 |
val.set_node(new_val); |
|
83 |
} |
|
84 |
||
85 |
MemNode::MemOrd mo = access.mem_node_mo(); |
|
86 |
||
87 |
Node* store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(), |
|
88 |
access.addr().type(), mo, requires_atomic_access, unaligned, mismatched); |
|
89 |
access.set_raw_access(store); |
|
90 |
return store; |
|
91 |
} |
|
92 |
||
93 |
Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { |
|
94 |
DecoratorSet decorators = access.decorators(); |
|
95 |
GraphKit* kit = access.kit(); |
|
96 |
||
97 |
Node* adr = access.addr().node(); |
|
98 |
const TypePtr* adr_type = access.addr().type(); |
|
99 |
||
100 |
bool mismatched = (decorators & C2_MISMATCHED) != 0; |
|
101 |
bool requires_atomic_access = (decorators & MO_UNORDERED) == 0; |
|
102 |
bool unaligned = (decorators & C2_UNALIGNED) != 0; |
|
103 |
bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0; |
|
104 |
bool pinned = (decorators & C2_PINNED_LOAD) != 0; |
|
105 |
||
50599
ecc2af326b5f
8204939: Change Access nomenclature: root to native
kbarrett
parents:
50180
diff
changeset
|
106 |
bool in_native = (decorators & IN_NATIVE) != 0; |
ecc2af326b5f
8204939: Change Access nomenclature: root to native
kbarrett
parents:
50180
diff
changeset
|
107 |
assert(!in_native, "not supported yet"); |
50180 | 108 |
|
109 |
MemNode::MemOrd mo = access.mem_node_mo(); |
|
110 |
LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest; |
|
111 |
Node* control = control_dependent ? kit->control() : NULL; |
|
112 |
||
113 |
Node* load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo, |
|
114 |
dep, requires_atomic_access, unaligned, mismatched); |
|
115 |
access.set_raw_access(load); |
|
116 |
||
117 |
return load; |
|
118 |
} |
|
119 |
||
120 |
class C2AccessFence: public StackObj { |
|
121 |
C2Access& _access; |
|
122 |
||
123 |
public: |
|
124 |
C2AccessFence(C2Access& access) : |
|
125 |
_access(access) { |
|
126 |
GraphKit* kit = access.kit(); |
|
127 |
DecoratorSet decorators = access.decorators(); |
|
128 |
||
129 |
bool is_write = (decorators & C2_WRITE_ACCESS) != 0; |
|
130 |
bool is_read = (decorators & C2_READ_ACCESS) != 0; |
|
131 |
bool is_atomic = is_read && is_write; |
|
132 |
||
133 |
bool is_volatile = (decorators & MO_SEQ_CST) != 0; |
|
134 |
bool is_release = (decorators & MO_RELEASE) != 0; |
|
135 |
||
136 |
if (is_atomic) { |
|
137 |
// Memory-model-wise, a LoadStore acts like a little synchronized |
|
138 |
// block, so needs barriers on each side. These don't translate |
|
139 |
// into actual barriers on most machines, but we still need rest of |
|
140 |
// compiler to respect ordering. |
|
141 |
if (is_release) { |
|
142 |
kit->insert_mem_bar(Op_MemBarRelease); |
|
143 |
} else if (is_volatile) { |
|
144 |
if (support_IRIW_for_not_multiple_copy_atomic_cpu) { |
|
145 |
kit->insert_mem_bar(Op_MemBarVolatile); |
|
146 |
} else { |
|
147 |
kit->insert_mem_bar(Op_MemBarRelease); |
|
148 |
} |
|
149 |
} |
|
150 |
} else if (is_write) { |
|
151 |
// If reference is volatile, prevent following memory ops from |
|
152 |
// floating down past the volatile write. Also prevents commoning |
|
153 |
// another volatile read. |
|
154 |
if (is_volatile || is_release) { |
|
155 |
kit->insert_mem_bar(Op_MemBarRelease); |
|
156 |
} |
|
157 |
} else { |
|
158 |
// Memory barrier to prevent normal and 'unsafe' accesses from |
|
159 |
// bypassing each other. Happens after null checks, so the |
|
160 |
// exception paths do not take memory state from the memory barrier, |
|
161 |
// so there's no problems making a strong assert about mixing users |
|
162 |
// of safe & unsafe memory. |
|
163 |
if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) { |
|
164 |
kit->insert_mem_bar(Op_MemBarVolatile); |
|
165 |
} |
|
166 |
} |
|
167 |
||
168 |
if (access.needs_cpu_membar()) { |
|
169 |
kit->insert_mem_bar(Op_MemBarCPUOrder); |
|
170 |
} |
|
171 |
||
172 |
if (is_atomic) { |
|
173 |
// 4984716: MemBars must be inserted before this |
|
174 |
// memory node in order to avoid a false |
|
175 |
// dependency which will confuse the scheduler. |
|
176 |
access.set_memory(); |
|
177 |
} |
|
178 |
} |
|
179 |
||
180 |
~C2AccessFence() { |
|
181 |
GraphKit* kit = _access.kit(); |
|
182 |
DecoratorSet decorators = _access.decorators(); |
|
183 |
||
184 |
bool is_write = (decorators & C2_WRITE_ACCESS) != 0; |
|
185 |
bool is_read = (decorators & C2_READ_ACCESS) != 0; |
|
186 |
bool is_atomic = is_read && is_write; |
|
187 |
||
188 |
bool is_volatile = (decorators & MO_SEQ_CST) != 0; |
|
189 |
bool is_acquire = (decorators & MO_ACQUIRE) != 0; |
|
190 |
||
191 |
// If reference is volatile, prevent following volatiles ops from |
|
192 |
// floating up before the volatile access. |
|
193 |
if (_access.needs_cpu_membar()) { |
|
194 |
kit->insert_mem_bar(Op_MemBarCPUOrder); |
|
195 |
} |
|
196 |
||
197 |
if (is_atomic) { |
|
198 |
if (is_acquire || is_volatile) { |
|
199 |
kit->insert_mem_bar(Op_MemBarAcquire); |
|
200 |
} |
|
201 |
} else if (is_write) { |
|
202 |
// If not multiple copy atomic, we do the MemBarVolatile before the load. |
|
203 |
if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) { |
|
204 |
kit->insert_mem_bar(Op_MemBarVolatile); // Use fat membar |
|
205 |
} |
|
206 |
} else { |
|
207 |
if (is_volatile || is_acquire) { |
|
208 |
kit->insert_mem_bar(Op_MemBarAcquire, _access.raw_access()); |
|
209 |
} |
|
210 |
} |
|
211 |
} |
|
212 |
||
213 |
}; |
|
214 |
||
215 |
Node* BarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const { |
|
216 |
C2AccessFence fence(access); |
|
217 |
resolve_address(access); |
|
218 |
return store_at_resolved(access, val); |
|
219 |
} |
|
220 |
||
221 |
Node* BarrierSetC2::load_at(C2Access& access, const Type* val_type) const { |
|
222 |
C2AccessFence fence(access); |
|
223 |
resolve_address(access); |
|
224 |
return load_at_resolved(access, val_type); |
|
225 |
} |
|
226 |
||
227 |
MemNode::MemOrd C2Access::mem_node_mo() const { |
|
228 |
bool is_write = (_decorators & C2_WRITE_ACCESS) != 0; |
|
229 |
bool is_read = (_decorators & C2_READ_ACCESS) != 0; |
|
230 |
if ((_decorators & MO_SEQ_CST) != 0) { |
|
231 |
if (is_write && is_read) { |
|
232 |
// For atomic operations |
|
233 |
return MemNode::seqcst; |
|
234 |
} else if (is_write) { |
|
235 |
return MemNode::release; |
|
236 |
} else { |
|
237 |
assert(is_read, "what else?"); |
|
238 |
return MemNode::acquire; |
|
239 |
} |
|
240 |
} else if ((_decorators & MO_RELEASE) != 0) { |
|
241 |
return MemNode::release; |
|
242 |
} else if ((_decorators & MO_ACQUIRE) != 0) { |
|
243 |
return MemNode::acquire; |
|
244 |
} else if (is_write) { |
|
245 |
// Volatile fields need releasing stores. |
|
246 |
// Non-volatile fields also need releasing stores if they hold an |
|
247 |
// object reference, because the object reference might point to |
|
248 |
// a freshly created object. |
|
249 |
// Conservatively release stores of object references. |
|
250 |
return StoreNode::release_if_reference(_type); |
|
251 |
} else { |
|
252 |
return MemNode::unordered; |
|
253 |
} |
|
254 |
} |
|
255 |
||
256 |
void C2Access::fixup_decorators() { |
|
257 |
bool default_mo = (_decorators & MO_DECORATOR_MASK) == 0; |
|
258 |
bool is_unordered = (_decorators & MO_UNORDERED) != 0 || default_mo; |
|
259 |
bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0; |
|
260 |
||
261 |
bool is_read = (_decorators & C2_READ_ACCESS) != 0; |
|
262 |
bool is_write = (_decorators & C2_WRITE_ACCESS) != 0; |
|
263 |
||
264 |
if (AlwaysAtomicAccesses && is_unordered) { |
|
265 |
_decorators &= ~MO_DECORATOR_MASK; // clear the MO bits |
|
266 |
_decorators |= MO_RELAXED; // Force the MO_RELAXED decorator with AlwaysAtomicAccess |
|
267 |
} |
|
268 |
||
269 |
_decorators = AccessInternal::decorator_fixup(_decorators); |
|
270 |
||
271 |
if (is_read && !is_write && anonymous) { |
|
272 |
// To be valid, unsafe loads may depend on other conditions than |
|
273 |
// the one that guards them: pin the Load node |
|
274 |
_decorators |= C2_CONTROL_DEPENDENT_LOAD; |
|
275 |
_decorators |= C2_PINNED_LOAD; |
|
276 |
const TypePtr* adr_type = _addr.type(); |
|
277 |
Node* adr = _addr.node(); |
|
278 |
if (!needs_cpu_membar() && adr_type->isa_instptr()) { |
|
279 |
assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null"); |
|
280 |
intptr_t offset = Type::OffsetBot; |
|
281 |
AddPNode::Ideal_base_and_offset(adr, &_kit->gvn(), offset); |
|
282 |
if (offset >= 0) { |
|
283 |
int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper()); |
|
284 |
if (offset < s) { |
|
285 |
// Guaranteed to be a valid access, no need to pin it |
|
286 |
_decorators ^= C2_CONTROL_DEPENDENT_LOAD; |
|
287 |
_decorators ^= C2_PINNED_LOAD; |
|
288 |
} |
|
289 |
} |
|
290 |
} |
|
291 |
} |
|
292 |
} |
|
293 |
||
294 |
//--------------------------- atomic operations--------------------------------- |
|
295 |
||
296 |
static void pin_atomic_op(C2AtomicAccess& access) { |
|
297 |
if (!access.needs_pinning()) { |
|
298 |
return; |
|
299 |
} |
|
300 |
// SCMemProjNodes represent the memory state of a LoadStore. Their |
|
301 |
// main role is to prevent LoadStore nodes from being optimized away |
|
302 |
// when their results aren't used. |
|
303 |
GraphKit* kit = access.kit(); |
|
304 |
Node* load_store = access.raw_access(); |
|
305 |
assert(load_store != NULL, "must pin atomic op"); |
|
306 |
Node* proj = kit->gvn().transform(new SCMemProjNode(load_store)); |
|
307 |
kit->set_memory(proj, access.alias_idx()); |
|
308 |
} |
|
309 |
||
310 |
void C2AtomicAccess::set_memory() { |
|
311 |
Node *mem = _kit->memory(_alias_idx); |
|
312 |
_memory = mem; |
|
313 |
} |
|
314 |
||
315 |
Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val, |
|
316 |
Node* new_val, const Type* value_type) const { |
|
317 |
GraphKit* kit = access.kit(); |
|
318 |
MemNode::MemOrd mo = access.mem_node_mo(); |
|
319 |
Node* mem = access.memory(); |
|
320 |
||
321 |
Node* adr = access.addr().node(); |
|
322 |
const TypePtr* adr_type = access.addr().type(); |
|
323 |
||
324 |
Node* load_store = NULL; |
|
325 |
||
326 |
if (access.is_oop()) { |
|
327 |
#ifdef _LP64 |
|
328 |
if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
329 |
Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); |
|
330 |
Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); |
|
331 |
load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo)); |
|
332 |
} else |
|
333 |
#endif |
|
334 |
{ |
|
335 |
load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo)); |
|
336 |
} |
|
337 |
} else { |
|
338 |
switch (access.type()) { |
|
339 |
case T_BYTE: { |
|
340 |
load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); |
|
341 |
break; |
|
342 |
} |
|
343 |
case T_SHORT: { |
|
344 |
load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); |
|
345 |
break; |
|
346 |
} |
|
347 |
case T_INT: { |
|
348 |
load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); |
|
349 |
break; |
|
350 |
} |
|
351 |
case T_LONG: { |
|
352 |
load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo)); |
|
353 |
break; |
|
354 |
} |
|
355 |
default: |
|
356 |
ShouldNotReachHere(); |
|
357 |
} |
|
358 |
} |
|
359 |
||
360 |
access.set_raw_access(load_store); |
|
361 |
pin_atomic_op(access); |
|
362 |
||
363 |
#ifdef _LP64 |
|
364 |
if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
365 |
return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); |
|
366 |
} |
|
367 |
#endif |
|
368 |
||
369 |
return load_store; |
|
370 |
} |
|
371 |
||
372 |
Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val, |
|
373 |
Node* new_val, const Type* value_type) const { |
|
374 |
GraphKit* kit = access.kit(); |
|
375 |
DecoratorSet decorators = access.decorators(); |
|
376 |
MemNode::MemOrd mo = access.mem_node_mo(); |
|
377 |
Node* mem = access.memory(); |
|
378 |
bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0; |
|
379 |
Node* load_store = NULL; |
|
380 |
Node* adr = access.addr().node(); |
|
381 |
||
382 |
if (access.is_oop()) { |
|
383 |
#ifdef _LP64 |
|
384 |
if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
385 |
Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); |
|
386 |
Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop())); |
|
387 |
if (is_weak_cas) { |
|
388 |
load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); |
|
389 |
} else { |
|
390 |
load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo)); |
|
391 |
} |
|
392 |
} else |
|
393 |
#endif |
|
394 |
{ |
|
395 |
if (is_weak_cas) { |
|
396 |
load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
397 |
} else { |
|
398 |
load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
399 |
} |
|
400 |
} |
|
401 |
} else { |
|
402 |
switch(access.type()) { |
|
403 |
case T_BYTE: { |
|
404 |
if (is_weak_cas) { |
|
405 |
load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
406 |
} else { |
|
407 |
load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
408 |
} |
|
409 |
break; |
|
410 |
} |
|
411 |
case T_SHORT: { |
|
412 |
if (is_weak_cas) { |
|
413 |
load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
414 |
} else { |
|
415 |
load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
416 |
} |
|
417 |
break; |
|
418 |
} |
|
419 |
case T_INT: { |
|
420 |
if (is_weak_cas) { |
|
421 |
load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
422 |
} else { |
|
423 |
load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
424 |
} |
|
425 |
break; |
|
426 |
} |
|
427 |
case T_LONG: { |
|
428 |
if (is_weak_cas) { |
|
429 |
load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
430 |
} else { |
|
431 |
load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo)); |
|
432 |
} |
|
433 |
break; |
|
434 |
} |
|
435 |
default: |
|
436 |
ShouldNotReachHere(); |
|
437 |
} |
|
438 |
} |
|
439 |
||
440 |
access.set_raw_access(load_store); |
|
441 |
pin_atomic_op(access); |
|
442 |
||
443 |
return load_store; |
|
444 |
} |
|
445 |
||
446 |
Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const { |
|
447 |
GraphKit* kit = access.kit(); |
|
448 |
Node* mem = access.memory(); |
|
449 |
Node* adr = access.addr().node(); |
|
450 |
const TypePtr* adr_type = access.addr().type(); |
|
451 |
Node* load_store = NULL; |
|
452 |
||
453 |
if (access.is_oop()) { |
|
454 |
#ifdef _LP64 |
|
455 |
if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
456 |
Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop())); |
|
457 |
load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop())); |
|
458 |
} else |
|
459 |
#endif |
|
460 |
{ |
|
461 |
load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr())); |
|
462 |
} |
|
463 |
} else { |
|
464 |
switch (access.type()) { |
|
465 |
case T_BYTE: |
|
466 |
load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type)); |
|
467 |
break; |
|
468 |
case T_SHORT: |
|
469 |
load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type)); |
|
470 |
break; |
|
471 |
case T_INT: |
|
472 |
load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type)); |
|
473 |
break; |
|
474 |
case T_LONG: |
|
475 |
load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type)); |
|
476 |
break; |
|
477 |
default: |
|
478 |
ShouldNotReachHere(); |
|
479 |
} |
|
480 |
} |
|
481 |
||
482 |
access.set_raw_access(load_store); |
|
483 |
pin_atomic_op(access); |
|
484 |
||
485 |
#ifdef _LP64 |
|
486 |
if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
487 |
return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); |
|
488 |
} |
|
489 |
#endif |
|
490 |
||
491 |
return load_store; |
|
492 |
} |
|
493 |
||
494 |
Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const { |
|
495 |
Node* load_store = NULL; |
|
496 |
GraphKit* kit = access.kit(); |
|
497 |
Node* adr = access.addr().node(); |
|
498 |
const TypePtr* adr_type = access.addr().type(); |
|
499 |
Node* mem = access.memory(); |
|
500 |
||
501 |
switch(access.type()) { |
|
502 |
case T_BYTE: |
|
503 |
load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type)); |
|
504 |
break; |
|
505 |
case T_SHORT: |
|
506 |
load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type)); |
|
507 |
break; |
|
508 |
case T_INT: |
|
509 |
load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type)); |
|
510 |
break; |
|
511 |
case T_LONG: |
|
512 |
load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type)); |
|
513 |
break; |
|
514 |
default: |
|
515 |
ShouldNotReachHere(); |
|
516 |
} |
|
517 |
||
518 |
access.set_raw_access(load_store); |
|
519 |
pin_atomic_op(access); |
|
520 |
||
521 |
return load_store; |
|
522 |
} |
|
523 |
||
524 |
Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val, |
|
525 |
Node* new_val, const Type* value_type) const { |
|
526 |
C2AccessFence fence(access); |
|
527 |
resolve_address(access); |
|
528 |
return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); |
|
529 |
} |
|
530 |
||
531 |
Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val, |
|
532 |
Node* new_val, const Type* value_type) const { |
|
533 |
C2AccessFence fence(access); |
|
534 |
resolve_address(access); |
|
535 |
return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); |
|
536 |
} |
|
537 |
||
538 |
Node* BarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const { |
|
539 |
C2AccessFence fence(access); |
|
540 |
resolve_address(access); |
|
541 |
return atomic_xchg_at_resolved(access, new_val, value_type); |
|
542 |
} |
|
543 |
||
544 |
Node* BarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const { |
|
545 |
C2AccessFence fence(access); |
|
546 |
resolve_address(access); |
|
547 |
return atomic_add_at_resolved(access, new_val, value_type); |
|
548 |
} |
|
549 |
||
550 |
void BarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const { |
|
551 |
// Exclude the header but include array length to copy by 8 bytes words. |
|
552 |
// Can't use base_offset_in_bytes(bt) since basic type is unknown. |
|
553 |
int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() : |
|
554 |
instanceOopDesc::base_offset_in_bytes(); |
|
555 |
// base_off: |
|
556 |
// 8 - 32-bit VM |
|
557 |
// 12 - 64-bit VM, compressed klass |
|
558 |
// 16 - 64-bit VM, normal klass |
|
559 |
if (base_off % BytesPerLong != 0) { |
|
560 |
assert(UseCompressedClassPointers, ""); |
|
561 |
if (is_array) { |
|
562 |
// Exclude length to copy by 8 bytes words. |
|
563 |
base_off += sizeof(int); |
|
564 |
} else { |
|
565 |
// Include klass to copy by 8 bytes words. |
|
566 |
base_off = instanceOopDesc::klass_offset_in_bytes(); |
|
567 |
} |
|
568 |
assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment"); |
|
569 |
} |
|
570 |
Node* src_base = kit->basic_plus_adr(src, base_off); |
|
571 |
Node* dst_base = kit->basic_plus_adr(dst, base_off); |
|
572 |
||
573 |
// Compute the length also, if needed: |
|
574 |
Node* countx = size; |
|
575 |
countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off))); |
|
576 |
countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) )); |
|
577 |
||
578 |
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; |
|
579 |
||
580 |
ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false); |
|
581 |
ac->set_clonebasic(); |
|
582 |
Node* n = kit->gvn().transform(ac); |
|
583 |
if (n == ac) { |
|
584 |
kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type); |
|
585 |
} else { |
|
586 |
kit->set_all_memory(n); |
|
587 |
} |
|
588 |
} |