author | roland |
Fri, 28 Sep 2018 10:42:40 +0200 | |
changeset 51984 | 2ef304ee001d |
parent 51880 | ec4c3c287ca7 |
child 52224 | 4f2215a00ed1 |
permissions | -rw-r--r-- |
50180 | 1 |
/* |
2 |
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#ifndef SHARE_GC_SHARED_C2_BARRIERSETC2_HPP |
|
26 |
#define SHARE_GC_SHARED_C2_BARRIERSETC2_HPP |
|
27 |
||
28 |
#include "memory/allocation.hpp" |
|
29 |
#include "oops/accessDecorators.hpp" |
|
30 |
#include "opto/loopnode.hpp" |
|
31 |
#include "opto/memnode.hpp" |
|
32 |
#include "utilities/globalDefinitions.hpp" |
|
33 |
||
34 |
// This means the access is mismatched. This means the value of an access |
|
35 |
// is not equivalent to the value pointed to by the address. |
|
36 |
const DecoratorSet C2_MISMATCHED = DECORATOR_LAST << 1; |
|
37 |
// The access may not be aligned to its natural size. |
|
38 |
const DecoratorSet C2_UNALIGNED = DECORATOR_LAST << 2; |
|
39 |
// The atomic cmpxchg is weak, meaning that spurious false negatives are allowed, |
|
40 |
// but never false positives. |
|
41 |
const DecoratorSet C2_WEAK_CMPXCHG = DECORATOR_LAST << 3; |
|
42 |
// This denotes that a load has control dependency. |
|
43 |
const DecoratorSet C2_CONTROL_DEPENDENT_LOAD = DECORATOR_LAST << 4; |
|
44 |
// This denotes that a load that must be pinned. |
|
45 |
const DecoratorSet C2_PINNED_LOAD = DECORATOR_LAST << 5; |
|
46 |
// This denotes that the access is produced from the sun.misc.Unsafe intrinsics. |
|
47 |
const DecoratorSet C2_UNSAFE_ACCESS = DECORATOR_LAST << 6; |
|
48 |
// This denotes that the access mutates state. |
|
49 |
const DecoratorSet C2_WRITE_ACCESS = DECORATOR_LAST << 7; |
|
50 |
// This denotes that the access reads state. |
|
51 |
const DecoratorSet C2_READ_ACCESS = DECORATOR_LAST << 8; |
|
52 |
||
53 |
class GraphKit; |
|
54 |
class IdealKit; |
|
55 |
class Node; |
|
56 |
class Type; |
|
57 |
class TypePtr; |
|
58 |
class PhaseMacroExpand; |
|
59 |
||
60 |
// This class wraps a node and a type. |
|
61 |
class C2AccessValue: public StackObj { |
|
62 |
protected: |
|
63 |
Node* _node; |
|
64 |
const Type* _type; |
|
65 |
||
66 |
public: |
|
67 |
C2AccessValue(Node* node, const Type* type) : |
|
68 |
_node(node), |
|
69 |
_type(type) {} |
|
70 |
||
71 |
Node* node() const { return _node; } |
|
72 |
const Type* type() const { return _type; } |
|
73 |
||
74 |
void set_node(Node* node) { _node = node; } |
|
75 |
}; |
|
76 |
||
77 |
// This class wraps a node and a pointer type. |
|
78 |
class C2AccessValuePtr: public C2AccessValue { |
|
79 |
||
80 |
public: |
|
81 |
C2AccessValuePtr(Node* node, const TypePtr* type) : |
|
82 |
C2AccessValue(node, reinterpret_cast<const Type*>(type)) {} |
|
83 |
||
84 |
const TypePtr* type() const { return reinterpret_cast<const TypePtr*>(_type); } |
|
85 |
}; |
|
86 |
||
87 |
// This class wraps a bunch of context parameters thare are passed around in the |
|
88 |
// BarrierSetC2 backend hierarchy, for loads and stores, to reduce boiler plate. |
|
89 |
class C2Access: public StackObj { |
|
90 |
protected: |
|
91 |
GraphKit* _kit; |
|
92 |
DecoratorSet _decorators; |
|
93 |
BasicType _type; |
|
94 |
Node* _base; |
|
95 |
C2AccessValuePtr& _addr; |
|
96 |
Node* _raw_access; |
|
97 |
||
98 |
void fixup_decorators(); |
|
99 |
void* barrier_set_state() const; |
|
100 |
||
101 |
public: |
|
102 |
C2Access(GraphKit* kit, DecoratorSet decorators, |
|
103 |
BasicType type, Node* base, C2AccessValuePtr& addr) : |
|
104 |
_kit(kit), |
|
105 |
_decorators(decorators), |
|
106 |
_type(type), |
|
107 |
_base(base), |
|
108 |
_addr(addr), |
|
109 |
_raw_access(NULL) |
|
110 |
{ |
|
111 |
fixup_decorators(); |
|
112 |
} |
|
113 |
||
114 |
GraphKit* kit() const { return _kit; } |
|
115 |
DecoratorSet decorators() const { return _decorators; } |
|
116 |
Node* base() const { return _base; } |
|
117 |
C2AccessValuePtr& addr() const { return _addr; } |
|
118 |
BasicType type() const { return _type; } |
|
119 |
bool is_oop() const { return _type == T_OBJECT || _type == T_ARRAY; } |
|
120 |
bool is_raw() const { return (_decorators & AS_RAW) != 0; } |
|
121 |
Node* raw_access() const { return _raw_access; } |
|
122 |
||
123 |
void set_raw_access(Node* raw_access) { _raw_access = raw_access; } |
|
124 |
virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses. |
|
125 |
||
126 |
MemNode::MemOrd mem_node_mo() const; |
|
127 |
bool needs_cpu_membar() const; |
|
128 |
||
129 |
template <typename T> |
|
130 |
T barrier_set_state_as() const { |
|
131 |
return reinterpret_cast<T>(barrier_set_state()); |
|
132 |
} |
|
133 |
}; |
|
134 |
||
135 |
// This class wraps a bunch of context parameters thare are passed around in the |
|
136 |
// BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate. |
|
137 |
class C2AtomicAccess: public C2Access { |
|
138 |
Node* _memory; |
|
139 |
uint _alias_idx; |
|
140 |
bool _needs_pinning; |
|
141 |
||
142 |
public: |
|
143 |
C2AtomicAccess(GraphKit* kit, DecoratorSet decorators, BasicType type, |
|
144 |
Node* base, C2AccessValuePtr& addr, uint alias_idx) : |
|
145 |
C2Access(kit, decorators, type, base, addr), |
|
146 |
_memory(NULL), |
|
147 |
_alias_idx(alias_idx), |
|
148 |
_needs_pinning(true) {} |
|
149 |
||
150 |
// Set the memory node based on the current memory slice. |
|
151 |
virtual void set_memory(); |
|
152 |
||
153 |
Node* memory() const { return _memory; } |
|
154 |
uint alias_idx() const { return _alias_idx; } |
|
155 |
bool needs_pinning() const { return _needs_pinning; } |
|
156 |
||
157 |
void set_needs_pinning(bool value) { _needs_pinning = value; } |
|
158 |
}; |
|
159 |
||
160 |
// This is the top-level class for the backend of the Access API in C2. |
|
161 |
// The top-level class is responsible for performing raw accesses. The |
|
162 |
// various GC barrier sets inherit from the BarrierSetC2 class to sprinkle |
|
163 |
// barriers into the accesses. |
|
164 |
class BarrierSetC2: public CHeapObj<mtGC> { |
|
165 |
protected: |
|
166 |
virtual void resolve_address(C2Access& access) const; |
|
167 |
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const; |
|
168 |
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; |
|
169 |
||
170 |
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val, |
|
171 |
Node* new_val, const Type* val_type) const; |
|
172 |
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val, |
|
173 |
Node* new_val, const Type* value_type) const; |
|
174 |
virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const; |
|
175 |
virtual Node* atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const; |
|
51880
ec4c3c287ca7
8210885: Convert left over loads/stores to access api
roland
parents:
51806
diff
changeset
|
176 |
void pin_atomic_op(C2AtomicAccess& access) const; |
50180 | 177 |
|
178 |
public: |
|
179 |
// This is the entry-point for the backend to perform accesses through the Access API. |
|
180 |
virtual Node* store_at(C2Access& access, C2AccessValue& val) const; |
|
181 |
virtual Node* load_at(C2Access& access, const Type* val_type) const; |
|
182 |
||
183 |
virtual Node* atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val, |
|
184 |
Node* new_val, const Type* val_type) const; |
|
185 |
virtual Node* atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val, |
|
186 |
Node* new_val, const Type* val_type) const; |
|
187 |
virtual Node* atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const; |
|
188 |
virtual Node* atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const; |
|
189 |
||
190 |
virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const; |
|
191 |
||
51705 | 192 |
virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const { return n; } |
193 |
||
51806 | 194 |
virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes, |
195 |
Node*& i_o, Node*& needgc_ctrl, |
|
196 |
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem, |
|
197 |
intx prefetch_lines) const; |
|
198 |
||
50180 | 199 |
// These are general helper methods used by C2 |
51984 | 200 |
enum ArrayCopyPhase { |
201 |
Parsing, |
|
202 |
Optimization, |
|
203 |
Expansion |
|
204 |
}; |
|
205 |
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; } |
|
50180 | 206 |
|
207 |
// Support for GC barriers emitted during parsing |
|
51485
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
50180
diff
changeset
|
208 |
virtual bool has_load_barriers() const { return false; } |
50180 | 209 |
virtual bool is_gc_barrier_node(Node* node) const { return false; } |
210 |
virtual Node* step_over_gc_barrier(Node* c) const { return c; } |
|
211 |
||
212 |
// Support for macro expanded GC barriers |
|
213 |
virtual void register_potential_barrier_node(Node* node) const { } |
|
214 |
virtual void unregister_potential_barrier_node(Node* node) const { } |
|
215 |
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { } |
|
216 |
virtual void enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {} |
|
217 |
virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const {} |
|
218 |
virtual void add_users_to_worklist(Unique_Node_List* worklist) const {} |
|
219 |
||
220 |
// Allow barrier sets to have shared state that is preserved across a compilation unit. |
|
221 |
// This could for example comprise macro nodes to be expanded during macro expansion. |
|
222 |
virtual void* create_barrier_state(Arena* comp_arena) const { return NULL; } |
|
223 |
// If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be |
|
224 |
// expanded later, then now is the time to do so. |
|
225 |
virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const { return false; } |
|
226 |
virtual void verify_gc_barriers(bool post_parse) const {} |
|
227 |
}; |
|
228 |
||
229 |
#endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP |