40 // The atomic cmpxchg is weak, meaning that spurious false negatives are allowed, |
40 // The atomic cmpxchg is weak, meaning that spurious false negatives are allowed, |
41 // but never false positives. |
41 // but never false positives. |
42 const DecoratorSet C2_WEAK_CMPXCHG = DECORATOR_LAST << 3; |
42 const DecoratorSet C2_WEAK_CMPXCHG = DECORATOR_LAST << 3; |
43 // This denotes that a load has control dependency. |
43 // This denotes that a load has control dependency. |
44 const DecoratorSet C2_CONTROL_DEPENDENT_LOAD = DECORATOR_LAST << 4; |
44 const DecoratorSet C2_CONTROL_DEPENDENT_LOAD = DECORATOR_LAST << 4; |
45 // This denotes that a load that must be pinned. |
45 // This denotes that a load that must be pinned, but may float above safepoints. |
46 const DecoratorSet C2_PINNED_LOAD = DECORATOR_LAST << 5; |
46 const DecoratorSet C2_UNKNOWN_CONTROL_LOAD = DECORATOR_LAST << 5; |
47 // This denotes that the access is produced from the sun.misc.Unsafe intrinsics. |
47 // This denotes that the access is produced from the sun.misc.Unsafe intrinsics. |
48 const DecoratorSet C2_UNSAFE_ACCESS = DECORATOR_LAST << 6; |
48 const DecoratorSet C2_UNSAFE_ACCESS = DECORATOR_LAST << 6; |
49 // This denotes that the access mutates state. |
49 // This denotes that the access mutates state. |
50 const DecoratorSet C2_WRITE_ACCESS = DECORATOR_LAST << 7; |
50 const DecoratorSet C2_WRITE_ACCESS = DECORATOR_LAST << 7; |
51 // This denotes that the access reads state. |
51 // This denotes that the access reads state. |
118 |
118 |
119 DecoratorSet decorators() const { return _decorators; } |
119 DecoratorSet decorators() const { return _decorators; } |
120 Node* base() const { return _base; } |
120 Node* base() const { return _base; } |
121 C2AccessValuePtr& addr() const { return _addr; } |
121 C2AccessValuePtr& addr() const { return _addr; } |
122 BasicType type() const { return _type; } |
122 BasicType type() const { return _type; } |
123 bool is_oop() const { return _type == T_OBJECT || _type == T_ARRAY; } |
123 bool is_oop() const { return is_reference_type(_type); } |
124 bool is_raw() const { return (_decorators & AS_RAW) != 0; } |
124 bool is_raw() const { return (_decorators & AS_RAW) != 0; } |
125 Node* raw_access() const { return _raw_access; } |
125 Node* raw_access() const { return _raw_access; } |
126 |
126 |
127 void set_raw_access(Node* raw_access) { _raw_access = raw_access; } |
127 void set_raw_access(Node* raw_access) { _raw_access = raw_access; } |
128 virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses. |
128 virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses. |
257 enum ArrayCopyPhase { |
257 enum ArrayCopyPhase { |
258 Parsing, |
258 Parsing, |
259 Optimization, |
259 Optimization, |
260 Expansion |
260 Expansion |
261 }; |
261 }; |
|
262 |
262 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; } |
263 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; } |
263 virtual void clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const; |
264 virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const; |
264 |
265 |
265 // Support for GC barriers emitted during parsing |
266 // Support for GC barriers emitted during parsing |
266 virtual bool has_load_barriers() const { return false; } |
267 virtual bool has_load_barrier_nodes() const { return false; } |
267 virtual bool is_gc_barrier_node(Node* node) const { return false; } |
268 virtual bool is_gc_barrier_node(Node* node) const { return false; } |
268 virtual Node* step_over_gc_barrier(Node* c) const { return c; } |
269 virtual Node* step_over_gc_barrier(Node* c) const { return c; } |
|
270 virtual Node* step_over_gc_barrier_ctrl(Node* c) const { return c; } |
269 |
271 |
270 // Support for macro expanded GC barriers |
272 // Support for macro expanded GC barriers |
271 virtual void register_potential_barrier_node(Node* node) const { } |
273 virtual void register_potential_barrier_node(Node* node) const { } |
272 virtual void unregister_potential_barrier_node(Node* node) const { } |
274 virtual void unregister_potential_barrier_node(Node* node) const { } |
273 virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { } |
275 virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { } |
274 virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {} |
276 virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {} |
275 virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {} |
277 virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {} |
276 virtual void add_users_to_worklist(Unique_Node_List* worklist) const {} |
|
277 |
278 |
278 // Allow barrier sets to have shared state that is preserved across a compilation unit. |
279 // Allow barrier sets to have shared state that is preserved across a compilation unit. |
279 // This could for example comprise macro nodes to be expanded during macro expansion. |
280 // This could for example comprise macro nodes to be expanded during macro expansion. |
280 virtual void* create_barrier_state(Arena* comp_arena) const { return NULL; } |
281 virtual void* create_barrier_state(Arena* comp_arena) const { return NULL; } |
281 // If the BarrierSetC2 state has barrier nodes in its compilation |
282 // If the BarrierSetC2 state has barrier nodes in its compilation |
286 virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; } |
287 virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; } |
287 |
288 |
288 virtual bool has_special_unique_user(const Node* node) const { return false; } |
289 virtual bool has_special_unique_user(const Node* node) const { return false; } |
289 |
290 |
290 enum CompilePhase { |
291 enum CompilePhase { |
291 BeforeOptimize, /* post_parse = true */ |
292 BeforeOptimize, |
292 BeforeExpand, /* post_parse = false */ |
293 BeforeMacroExpand, |
293 BeforeCodeGen |
294 BeforeCodeGen |
294 }; |
295 }; |
295 virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const {} |
|
296 |
296 |
297 virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const { return false; } |
297 virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const { return false; } |
298 #ifdef ASSERT |
298 #ifdef ASSERT |
299 virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const { return false; } |
299 virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const { return false; } |
|
300 virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const {} |
300 #endif |
301 #endif |
301 |
302 |
302 virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { return false; } |
303 virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { return false; } |
303 |
304 |
304 virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { return false; } |
305 virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { return false; } |
305 virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { return false; } |
306 virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { return false; } |
306 virtual bool escape_has_out_with_unsafe_object(Node* n) const { return false; } |
307 virtual bool escape_has_out_with_unsafe_object(Node* n) const { return false; } |
307 virtual bool escape_is_barrier_node(Node* n) const { return false; } |
|
308 |
308 |
309 virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const { return false; }; |
309 virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const { return false; }; |
310 virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { return false; }; |
310 virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { return false; }; |
311 virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const { return false; } |
311 virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const { return false; } |
312 |
312 |
313 virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const {} |
313 virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const { } |
314 virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const {} |
314 virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const { } |
315 |
315 |
316 virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const { return NULL; } |
316 virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const { return NULL; } |
317 virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const { return false; } |
317 virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const { return false; } |
318 virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const { return false; } |
318 virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const { return false; } |
|
319 |
|
320 virtual void late_barrier_analysis() const { } |
|
321 virtual int estimate_stub_size() const { return 0; } |
|
322 virtual void emit_stubs(CodeBuffer& cb) const { } |
319 }; |
323 }; |
320 |
324 |
321 #endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP |
325 #endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP |