--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp Wed Nov 14 23:12:19 2018 -0800
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp Thu Nov 15 10:59:56 2018 +0100
@@ -31,6 +31,7 @@
#include "gc/g1/heapRegion.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/compile.hpp"
+#include "opto/escape.hpp"
#include "opto/graphKit.hpp"
#include "opto/idealKit.hpp"
#include "opto/macro.hpp"
@@ -840,3 +841,32 @@
}
}
#endif
+
+bool G1BarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
+ if (opcode == Op_StoreP) {
+ Node* adr = n->in(MemNode::Address);
+ const Type* adr_type = gvn->type(adr);
+ // Pointer stores in G1 barriers looks like unsafe access.
+ // Ignore such stores to be able scalar replace non-escaping
+ // allocations.
+ if (adr_type->isa_rawptr() && adr->is_AddP()) {
+ Node* base = conn_graph->get_addp_base(adr);
+ if (base->Opcode() == Op_LoadP &&
+ base->in(MemNode::Address)->is_AddP()) {
+ adr = base->in(MemNode::Address);
+ Node* tls = conn_graph->get_addp_base(adr);
+ if (tls->Opcode() == Op_ThreadLocal) {
+ int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
+ const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
+ if (offs == buf_offset) {
+ return true; // G1 pre barrier previous oop value store.
+ }
+ if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
+ return true; // G1 post barrier card address store.
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.hpp Wed Nov 14 23:12:19 2018 -0800
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.hpp Thu Nov 15 10:59:56 2018 +0100
@@ -92,6 +92,8 @@
#ifdef ASSERT
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
#endif
+
+ virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
};
#endif // SHARE_GC_SHARED_C2_G1BARRIERSETC2_HPP
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp Wed Nov 14 23:12:19 2018 -0800
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp Thu Nov 15 10:59:56 2018 +0100
@@ -55,12 +55,15 @@
const DecoratorSet C2_ARRAY_COPY = DECORATOR_LAST << 10;
class Compile;
+class ConnectionGraph;
class GraphKit;
class IdealKit;
class Node;
+class PhaseGVN;
+class PhaseMacroExpand;
class Type;
class TypePtr;
-class PhaseMacroExpand;
+class Unique_Node_List;
// This class wraps a node and a type.
class C2AccessValue: public StackObj {
@@ -291,6 +294,10 @@
virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { return false; }
+ virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { return false; }
+ virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { return false; }
+ virtual bool escape_has_out_with_unsafe_object(Node* n) const { return false; }
+ virtual bool escape_is_barrier_node(Node* n) const { return false; }
};
#endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Wed Nov 14 23:12:19 2018 -0800
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Thu Nov 15 10:59:56 2018 +0100
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "opto/compile.hpp"
#include "opto/castnode.hpp"
+#include "opto/escape.hpp"
#include "opto/graphKit.hpp"
#include "opto/idealKit.hpp"
#include "opto/loopnode.hpp"
@@ -1571,3 +1572,44 @@
}
#endif
+
+bool ZBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
+ switch (opcode) {
+ case Op_LoadBarrierSlowReg:
+ case Op_LoadBarrierWeakSlowReg:
+ conn_graph->add_objload_to_connection_graph(n, delayed_worklist);
+ return true;
+ case Op_Proj:
+ if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
+ conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop),
+ delayed_worklist);
+ return true;
+ }
+ default:
+ break;
+ }
+ return false;
+}
+
+bool ZBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const {
+ switch (opcode) {
+ case Op_LoadBarrierSlowReg:
+ case Op_LoadBarrierWeakSlowReg: {
+ const Type *t = gvn->type(n);
+ if (t->make_ptr() != NULL) {
+ Node *adr = n->in(MemNode::Address);
+ conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
+ return true;
+ }
+ }
+ case Op_Proj: {
+ if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
+ conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL);
+ return true;
+ }
+ }
+ default:
+ break;
+ }
+ return false;
+}
\ No newline at end of file
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp Wed Nov 14 23:12:19 2018 -0800
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp Thu Nov 15 10:59:56 2018 +0100
@@ -212,6 +212,10 @@
#ifdef ASSERT
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
#endif
+
+ virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
+ virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const;
+
};
#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
--- a/src/hotspot/share/opto/escape.cpp Wed Nov 14 23:12:19 2018 -0800
+++ b/src/hotspot/share/opto/escape.cpp Thu Nov 15 10:59:56 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "compiler/compileLog.hpp"
+#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.hpp"
@@ -39,12 +40,6 @@
#include "opto/movenode.hpp"
#include "opto/rootnode.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_G1GC
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_G1GC
-#if INCLUDE_ZGC
-#include "gc/z/c2/zBarrierSetC2.hpp"
-#endif
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
@@ -388,6 +383,10 @@
return; // Skip predefined nodes.
int opcode = n->Opcode();
+ bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
+ if (gc_handled) {
+ return; // Ignore node if already handled by GC.
+ }
switch (opcode) {
case Op_AddP: {
Node* base = get_addp_base(n);
@@ -453,10 +452,6 @@
break;
}
case Op_LoadP:
-#if INCLUDE_ZGC
- case Op_LoadBarrierSlowReg:
- case Op_LoadBarrierWeakSlowReg:
-#endif
case Op_LoadN:
case Op_LoadPLocked: {
add_objload_to_connection_graph(n, delayed_worklist);
@@ -491,13 +486,6 @@
add_local_var_and_edge(n, PointsToNode::NoEscape,
n->in(0), delayed_worklist);
}
-#if INCLUDE_ZGC
- else if (UseZGC) {
- if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
- add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
- }
- }
-#endif
break;
}
case Op_Rethrow: // Exception object escapes
@@ -525,62 +513,7 @@
case Op_WeakCompareAndSwapN:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN: {
- Node* adr = n->in(MemNode::Address);
- const Type *adr_type = igvn->type(adr);
- adr_type = adr_type->make_ptr();
- if (adr_type == NULL) {
- break; // skip dead nodes
- }
- if ( adr_type->isa_oopptr()
- || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
- && adr_type == TypeRawPtr::NOTNULL
- && adr->in(AddPNode::Address)->is_Proj()
- && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
- delayed_worklist->push(n); // Process it later.
-#ifdef ASSERT
- assert(adr->is_AddP(), "expecting an AddP");
- if (adr_type == TypeRawPtr::NOTNULL) {
- // Verify a raw address for a store captured by Initialize node.
- int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
- assert(offs != Type::OffsetBot, "offset must be a constant");
- }
-#endif
- } else {
- // Ignore copy the displaced header to the BoxNode (OSR compilation).
- if (adr->is_BoxLock())
- break;
- // Stored value escapes in unsafe access.
- if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
- // Pointer stores in G1 barriers looks like unsafe access.
- // Ignore such stores to be able scalar replace non-escaping
- // allocations.
-#if INCLUDE_G1GC
- if (UseG1GC && adr->is_AddP()) {
- Node* base = get_addp_base(adr);
- if (base->Opcode() == Op_LoadP &&
- base->in(MemNode::Address)->is_AddP()) {
- adr = base->in(MemNode::Address);
- Node* tls = get_addp_base(adr);
- if (tls->Opcode() == Op_ThreadLocal) {
- int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
- if (offs == in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())) {
- break; // G1 pre barrier previous oop value store.
- }
- if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
- break; // G1 post barrier card address store.
- }
- }
- }
- }
-#endif
- delayed_worklist->push(n); // Process unsafe access later.
- break;
- }
-#ifdef ASSERT
- n->dump(1);
- assert(false, "not unsafe or G1 barrier raw StoreP");
-#endif
- }
+ add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
break;
}
case Op_AryEq:
@@ -633,6 +566,10 @@
(n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
"node should be registered already");
int opcode = n->Opcode();
+ bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
+ if (gc_handled) {
+ return; // Ignore node if already handled by GC.
+ }
switch (opcode) {
case Op_AddP: {
Node* base = get_addp_base(n);
@@ -666,10 +603,6 @@
break;
}
case Op_LoadP:
-#if INCLUDE_ZGC
- case Op_LoadBarrierSlowReg:
- case Op_LoadBarrierWeakSlowReg:
-#endif
case Op_LoadN:
case Op_LoadPLocked: {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
@@ -709,14 +642,6 @@
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
break;
}
-#if INCLUDE_ZGC
- else if (UseZGC) {
- if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
- add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL);
- break;
- }
- }
-#endif
ELSE_FAIL("Op_Proj");
}
case Op_Rethrow: // Exception object escapes
@@ -742,47 +667,7 @@
case Op_WeakCompareAndSwapN:
case Op_GetAndSetP:
case Op_GetAndSetN: {
- Node* adr = n->in(MemNode::Address);
- const Type *adr_type = _igvn->type(adr);
- adr_type = adr_type->make_ptr();
-#ifdef ASSERT
- if (adr_type == NULL) {
- n->dump(1);
- assert(adr_type != NULL, "dead node should not be on list");
- break;
- }
-#endif
- if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
- opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
- add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
- }
- if ( adr_type->isa_oopptr()
- || ( (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
- && adr_type == TypeRawPtr::NOTNULL
- && adr->in(AddPNode::Address)->is_Proj()
- && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
- // Point Address to Value
- PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
- assert(adr_ptn != NULL &&
- adr_ptn->as_Field()->is_oop(), "node should be registered");
- Node *val = n->in(MemNode::ValueIn);
- PointsToNode* ptn = ptnode_adr(val->_idx);
- assert(ptn != NULL, "node should be registered");
- add_edge(adr_ptn, ptn);
- break;
- } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
- // Stored value escapes in unsafe access.
- Node *val = n->in(MemNode::ValueIn);
- PointsToNode* ptn = ptnode_adr(val->_idx);
- assert(ptn != NULL, "node should be registered");
- set_escape_state(ptn, PointsToNode::GlobalEscape);
- // Add edge to object for unsafe access with offset.
- PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
- assert(adr_ptn != NULL, "node should be registered");
- if (adr_ptn->is_Field()) {
- assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
- add_edge(adr_ptn, ptn);
- }
+ if (add_final_edges_unsafe_access(n, opcode)) {
break;
}
ELSE_FAIL("Op_StoreP");
@@ -827,6 +712,93 @@
return;
}
+void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
+ Node* adr = n->in(MemNode::Address);
+ const Type* adr_type = _igvn->type(adr);
+ adr_type = adr_type->make_ptr();
+ if (adr_type == NULL) {
+ return; // skip dead nodes
+ }
+ if (adr_type->isa_oopptr()
+ || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
+ && adr_type == TypeRawPtr::NOTNULL
+ && adr->in(AddPNode::Address)->is_Proj()
+ && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
+ delayed_worklist->push(n); // Process it later.
+#ifdef ASSERT
+ assert (adr->is_AddP(), "expecting an AddP");
+ if (adr_type == TypeRawPtr::NOTNULL) {
+ // Verify a raw address for a store captured by Initialize node.
+ int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
+ assert(offs != Type::OffsetBot, "offset must be a constant");
+ }
+#endif
+ } else {
+ // Ignore copy the displaced header to the BoxNode (OSR compilation).
+ if (adr->is_BoxLock()) {
+ return;
+ }
+ // Stored value escapes in unsafe access.
+ if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
+ delayed_worklist->push(n); // Process unsafe access later.
+ return;
+ }
+#ifdef ASSERT
+ n->dump(1);
+ assert(false, "not unsafe");
+#endif
+ }
+}
+
+bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
+ Node* adr = n->in(MemNode::Address);
+ const Type *adr_type = _igvn->type(adr);
+ adr_type = adr_type->make_ptr();
+#ifdef ASSERT
+ if (adr_type == NULL) {
+ n->dump(1);
+ assert(adr_type != NULL, "dead node should not be on list");
+ return true;
+ }
+#endif
+
+ if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
+ opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
+ add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
+ }
+
+ if (adr_type->isa_oopptr()
+ || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
+ && adr_type == TypeRawPtr::NOTNULL
+ && adr->in(AddPNode::Address)->is_Proj()
+ && adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
+ // Point Address to Value
+ PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
+ assert(adr_ptn != NULL &&
+ adr_ptn->as_Field()->is_oop(), "node should be registered");
+ Node* val = n->in(MemNode::ValueIn);
+ PointsToNode* ptn = ptnode_adr(val->_idx);
+ assert(ptn != NULL, "node should be registered");
+ add_edge(adr_ptn, ptn);
+ return true;
+ } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
+ // Stored value escapes in unsafe access.
+ Node* val = n->in(MemNode::ValueIn);
+ PointsToNode* ptn = ptnode_adr(val->_idx);
+ assert(ptn != NULL, "node should be registered");
+ set_escape_state(ptn, PointsToNode::GlobalEscape);
+ // Add edge to object for unsafe access with offset.
+ PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
+ assert(adr_ptn != NULL, "node should be registered");
+ if (adr_ptn->is_Field()) {
+ assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
+ add_edge(adr_ptn, ptn);
+ }
+ return true;
+ }
+ return false;
+}
+
void ConnectionGraph::add_call_node(CallNode* call) {
assert(call->returns_pointer(), "only for call which returns pointer");
uint call_idx = call->_idx;
@@ -2100,7 +2072,8 @@
// Check for unsafe oop field access
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
- n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
+ n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
+ BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
bt = T_OBJECT;
(*unsafe) = true;
}
@@ -2118,7 +2091,8 @@
// Allocation initialization, ThreadLocal field access, unsafe access
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
- n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN)) {
+ n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
+ BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
bt = T_OBJECT;
}
}
@@ -2359,7 +2333,8 @@
assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
- (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
+ (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) ||
+ BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(uncast_base), "sanity");
}
}
return base;
@@ -3092,6 +3067,7 @@
n->is_CheckCastPP() ||
n->is_EncodeP() ||
n->is_DecodeN() ||
+ BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(n) ||
(n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
if (visited.test_set(n->_idx)) {
assert(n->is_Phi(), "loops only through Phi's");
@@ -3162,6 +3138,7 @@
use->is_CheckCastPP() ||
use->is_EncodeNarrowPtr() ||
use->is_DecodeNarrowPtr() ||
+ BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(use) ||
(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
alloc_worklist.append_if_missing(use);
#ifdef ASSERT
@@ -3564,3 +3541,8 @@
}
}
#endif
+
+void ConnectionGraph::record_for_optimizer(Node *n) {
+ _igvn->_worklist.push(n);
+ _igvn->add_users_to_worklist(n);
+}
--- a/src/hotspot/share/opto/escape.hpp Wed Nov 14 23:12:19 2018 -0800
+++ b/src/hotspot/share/opto/escape.hpp Thu Nov 15 10:59:56 2018 +0100
@@ -371,8 +371,6 @@
_nodes.at_put(n->_idx, ptn);
}
- // Utility function for nodes that load an object
- void add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist);
// Create PointsToNode node and add it to Connection Graph.
void add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist);
@@ -511,24 +509,8 @@
return is_new;
}
- // Add LocalVar node and edge if possible
- void add_local_var_and_edge(Node* n, PointsToNode::EscapeState es, Node* to,
- Unique_Node_List *delayed_worklist) {
- PointsToNode* ptn = ptnode_adr(to->_idx);
- if (delayed_worklist != NULL) { // First iteration of CG construction
- add_local_var(n, es);
- if (ptn == NULL) {
- delayed_worklist->push(n);
- return; // Process it later.
- }
- } else {
- assert(ptn != NULL, "node should be registered");
- }
- add_edge(ptnode_adr(n->_idx), ptn);
- }
// Helper functions
bool is_oop_field(Node* n, int offset, bool* unsafe);
- Node* get_addp_base(Node *addp);
static Node* find_second_addp(Node* addp, Node* n);
// offset of a field reference
int address_offset(Node* adr, PhaseTransform *phase);
@@ -574,10 +556,7 @@
}
// Notify optimizer that a node has been modified
- void record_for_optimizer(Node *n) {
- _igvn->_worklist.push(n);
- _igvn->add_users_to_worklist(n);
- }
+ void record_for_optimizer(Node *n);
// Compute the escape information
bool compute_escape();
@@ -593,6 +572,31 @@
bool not_global_escape(Node *n);
+ // To be used by, e.g., BarrierSetC2 impls
+ Node* get_addp_base(Node* addp);
+
+ // Utility function for nodes that load an object
+ void add_objload_to_connection_graph(Node* n, Unique_Node_List* delayed_worklist);
+
+ // Add LocalVar node and edge if possible
+ void add_local_var_and_edge(Node* n, PointsToNode::EscapeState es, Node* to,
+ Unique_Node_List *delayed_worklist) {
+ PointsToNode* ptn = ptnode_adr(to->_idx);
+ if (delayed_worklist != NULL) { // First iteration of CG construction
+ add_local_var(n, es);
+ if (ptn == NULL) {
+ delayed_worklist->push(n);
+ return; // Process it later.
+ }
+ } else {
+ assert(ptn != NULL, "node should be registered");
+ }
+ add_edge(ptnode_adr(n->_idx), ptn);
+ }
+
+ void add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist);
+ bool add_final_edges_unsafe_access(Node* n, uint opcode);
+
#ifndef PRODUCT
void dump(GrowableArray<PointsToNode*>& ptnodes_worklist);
#endif