src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
changeset 52424 e3d79743f57d
parent 51984 2ef304ee001d
child 53765 e002408eb0c0
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp	Tue Nov 06 15:23:52 2018 +0800
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp	Tue Nov 06 10:01:27 2018 +0100
@@ -35,10 +35,12 @@
 // By default this is a no-op.
 void BarrierSetC2::resolve_address(C2Access& access) const { }
 
-void* C2Access::barrier_set_state() const {
+void* C2ParseAccess::barrier_set_state() const {
   return _kit->barrier_set_state();
 }
 
+PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
+
 bool C2Access::needs_cpu_membar() const {
   bool mismatched = (_decorators & C2_MISMATCHED) != 0;
   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
@@ -70,7 +72,6 @@
 
 Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
   DecoratorSet decorators = access.decorators();
-  GraphKit* kit = access.kit();
 
   bool mismatched = (decorators & C2_MISMATCHED) != 0;
   bool unaligned = (decorators & C2_UNALIGNED) != 0;
@@ -79,22 +80,49 @@
   bool in_native = (decorators & IN_NATIVE) != 0;
   assert(!in_native, "not supported yet");
 
-  if (access.type() == T_DOUBLE) {
-    Node* new_val = kit->dstore_rounding(val.node());
-    val.set_node(new_val);
-  }
-
   MemNode::MemOrd mo = access.mem_node_mo();
 
-  Node* store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
+  Node* store;
+  if (access.is_parse_access()) {
+    C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
+
+    GraphKit* kit = parse_access.kit();
+    if (access.type() == T_DOUBLE) {
+      Node* new_val = kit->dstore_rounding(val.node());
+      val.set_node(new_val);
+    }
+
+    store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
                                      access.addr().type(), mo, requires_atomic_access, unaligned, mismatched);
-  access.set_raw_access(store);
+    access.set_raw_access(store);
+  } else {
+    assert(!requires_atomic_access, "not yet supported");
+    assert(access.is_opt_access(), "either parse or opt access");
+    C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
+    Node* ctl = opt_access.ctl();
+    MergeMemNode* mm = opt_access.mem();
+    PhaseGVN& gvn = opt_access.gvn();
+    const TypePtr* adr_type = access.addr().type();
+    int alias = gvn.C->get_alias_index(adr_type);
+    Node* mem = mm->memory_at(alias);
+
+    StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), access.type(), mo);
+    if (unaligned) {
+      st->set_unaligned_access();
+    }
+    if (mismatched) {
+      st->set_mismatched_access();
+    }
+    store = gvn.transform(st);
+    if (store == st) {
+      mm->set_memory_at(alias, st);
+    }
+  }
   return store;
 }
 
 Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
   DecoratorSet decorators = access.decorators();
-  GraphKit* kit = access.kit();
 
   Node* adr = access.addr().node();
   const TypePtr* adr_type = access.addr().type();
@@ -109,16 +137,31 @@
 
   MemNode::MemOrd mo = access.mem_node_mo();
   LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
-  Node* control = control_dependent ? kit->control() : NULL;
 
   Node* load;
-  if (in_native) {
-    load = kit->make_load(control, adr, val_type, access.type(), mo);
+  if (access.is_parse_access()) {
+    C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
+    GraphKit* kit = parse_access.kit();
+    Node* control = control_dependent ? kit->control() : NULL;
+
+    if (in_native) {
+      load = kit->make_load(control, adr, val_type, access.type(), mo);
+    } else {
+      load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
+                            dep, requires_atomic_access, unaligned, mismatched);
+    }
+    access.set_raw_access(load);
   } else {
-    load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
-                          dep, requires_atomic_access, unaligned, mismatched);
+    assert(!requires_atomic_access, "not yet supported");
+    assert(access.is_opt_access(), "either parse or opt access");
+    C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
+    Node* control = control_dependent ? opt_access.ctl() : NULL;
+    MergeMemNode* mm = opt_access.mem();
+    PhaseGVN& gvn = opt_access.gvn();
+    Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
+    load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep, unaligned, mismatched);
+    load = gvn.transform(load);
   }
-  access.set_raw_access(load);
 
   return load;
 }
@@ -130,7 +173,11 @@
 public:
   C2AccessFence(C2Access& access) :
     _access(access), _leading_membar(NULL) {
-    GraphKit* kit = access.kit();
+    GraphKit* kit = NULL;
+    if (access.is_parse_access()) {
+      C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
+      kit = parse_access.kit();
+    }
     DecoratorSet decorators = access.decorators();
 
     bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
@@ -141,6 +188,7 @@
     bool is_release = (decorators & MO_RELEASE) != 0;
 
     if (is_atomic) {
+      assert(kit != NULL, "unsupported at optimization time");
       // Memory-model-wise, a LoadStore acts like a little synchronized
       // block, so needs barriers on each side.  These don't translate
       // into actual barriers on most machines, but we still need rest of
@@ -159,6 +207,7 @@
       // floating down past the volatile write.  Also prevents commoning
       // another volatile read.
       if (is_volatile || is_release) {
+        assert(kit != NULL, "unsupported at optimization time");
         _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
       }
     } else {
@@ -168,11 +217,13 @@
       // so there's no problems making a strong assert about mixing users
       // of safe & unsafe memory.
       if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) {
+        assert(kit != NULL, "unsupported at optimization time");
         _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
       }
     }
 
     if (access.needs_cpu_membar()) {
+      assert(kit != NULL, "unsupported at optimization time");
       kit->insert_mem_bar(Op_MemBarCPUOrder);
     }
 
@@ -185,7 +236,11 @@
   }
 
   ~C2AccessFence() {
-    GraphKit* kit = _access.kit();
+    GraphKit* kit = NULL;
+    if (_access.is_parse_access()) {
+      C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(_access);
+      kit = parse_access.kit();
+    }
     DecoratorSet decorators = _access.decorators();
 
     bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
@@ -202,6 +257,7 @@
     }
 
     if (is_atomic) {
+      assert(kit != NULL, "unsupported at optimization time");
       if (is_acquire || is_volatile) {
         Node* n = _access.raw_access();
         Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
@@ -212,6 +268,7 @@
     } else if (is_write) {
       // If not multiple copy atomic, we do the MemBarVolatile before the load.
       if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
+        assert(kit != NULL, "unsupported at optimization time");
         Node* n = _access.raw_access();
         Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar
         if (_leading_membar != NULL) {
@@ -220,6 +277,7 @@
       }
     } else {
       if (is_volatile || is_acquire) {
+        assert(kit != NULL, "unsupported at optimization time");
         Node* n = _access.raw_access();
         assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
         Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
@@ -295,7 +353,7 @@
     if (!needs_cpu_membar() && adr_type->isa_instptr()) {
       assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
       intptr_t offset = Type::OffsetBot;
-      AddPNode::Ideal_base_and_offset(adr, &_kit->gvn(), offset);
+      AddPNode::Ideal_base_and_offset(adr, &gvn(), offset);
       if (offset >= 0) {
         int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
         if (offset < s) {
@@ -310,26 +368,28 @@
 
 //--------------------------- atomic operations---------------------------------
 
-void BarrierSetC2::pin_atomic_op(C2AtomicAccess& access) const {
+void BarrierSetC2::pin_atomic_op(C2AtomicParseAccess& access) const {
   if (!access.needs_pinning()) {
     return;
   }
   // SCMemProjNodes represent the memory state of a LoadStore. Their
   // main role is to prevent LoadStore nodes from being optimized away
   // when their results aren't used.
-  GraphKit* kit = access.kit();
+  assert(access.is_parse_access(), "entry not supported at optimization time");
+  C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
+  GraphKit* kit = parse_access.kit();
   Node* load_store = access.raw_access();
   assert(load_store != NULL, "must pin atomic op");
   Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
   kit->set_memory(proj, access.alias_idx());
 }
 
-void C2AtomicAccess::set_memory() {
+void C2AtomicParseAccess::set_memory() {
   Node *mem = _kit->memory(_alias_idx);
   _memory = mem;
 }
 
-Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
+Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
                                                    Node* new_val, const Type* value_type) const {
   GraphKit* kit = access.kit();
   MemNode::MemOrd mo = access.mem_node_mo();
@@ -386,7 +446,7 @@
   return load_store;
 }
 
-Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
+Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
                                                     Node* new_val, const Type* value_type) const {
   GraphKit* kit = access.kit();
   DecoratorSet decorators = access.decorators();
@@ -460,7 +520,7 @@
   return load_store;
 }
 
-Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
+Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
   GraphKit* kit = access.kit();
   Node* mem = access.memory();
   Node* adr = access.addr().node();
@@ -508,7 +568,7 @@
   return load_store;
 }
 
-Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
+Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
   Node* load_store = NULL;
   GraphKit* kit = access.kit();
   Node* adr = access.addr().node();
@@ -538,27 +598,27 @@
   return load_store;
 }
 
-Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
+Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
                                           Node* new_val, const Type* value_type) const {
   C2AccessFence fence(access);
   resolve_address(access);
   return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
 }
 
-Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
+Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
                                            Node* new_val, const Type* value_type) const {
   C2AccessFence fence(access);
   resolve_address(access);
   return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 }
 
-Node* BarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
+Node* BarrierSetC2::atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
   C2AccessFence fence(access);
   resolve_address(access);
   return atomic_xchg_at_resolved(access, new_val, value_type);
 }
 
-Node* BarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
+Node* BarrierSetC2::atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
   C2AccessFence fence(access);
   resolve_address(access);
   return atomic_add_at_resolved(access, new_val, value_type);
@@ -594,7 +654,7 @@
 
   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 
-  ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false);
+  ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, true, false);
   ac->set_clonebasic();
   Node* n = kit->gvn().transform(ac);
   if (n == ac) {
@@ -731,3 +791,8 @@
   }
   return fast_oop;
 }
+
+void BarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const {
+  // no barrier
+  igvn.replace_node(ac, call);
+}