src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 54327 a4d19817609c
child 58679 9c3209ff7550
equal deleted inserted replaced
58677:13588c901957 58678:9cf78a70fa4f
    20  * or visit www.oracle.com if you need additional information or have any
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    21  * questions.
    22  */
    22  */
    23 
    23 
    24 #include "precompiled.hpp"
    24 #include "precompiled.hpp"
       
    25 #include "classfile/javaClasses.hpp"
       
    26 #include "gc/z/c2/zBarrierSetC2.hpp"
       
    27 #include "gc/z/zBarrierSet.hpp"
       
    28 #include "gc/z/zBarrierSetAssembler.hpp"
       
    29 #include "gc/z/zBarrierSetRuntime.hpp"
       
    30 #include "opto/block.hpp"
    25 #include "opto/compile.hpp"
    31 #include "opto/compile.hpp"
    26 #include "opto/castnode.hpp"
       
    27 #include "opto/escape.hpp"
       
    28 #include "opto/graphKit.hpp"
    32 #include "opto/graphKit.hpp"
    29 #include "opto/idealKit.hpp"
    33 #include "opto/machnode.hpp"
    30 #include "opto/loopnode.hpp"
    34 #include "opto/memnode.hpp"
    31 #include "opto/macro.hpp"
       
    32 #include "opto/node.hpp"
    35 #include "opto/node.hpp"
    33 #include "opto/type.hpp"
    36 #include "opto/regalloc.hpp"
       
    37 #include "opto/rootnode.hpp"
       
    38 #include "utilities/growableArray.hpp"
    34 #include "utilities/macros.hpp"
    39 #include "utilities/macros.hpp"
    35 #include "gc/z/zBarrierSet.hpp"
    40 
    36 #include "gc/z/c2/zBarrierSetC2.hpp"
    41 class ZBarrierSetC2State : public ResourceObj {
    37 #include "gc/z/zThreadLocalData.hpp"
    42 private:
    38 #include "gc/z/zBarrierSetRuntime.hpp"
    43   GrowableArray<ZLoadBarrierStubC2*>* _stubs;
    39 
    44   Node_Array                          _live;
    40 ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena) :
    45 
    41     _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
    46 public:
    42 
    47   ZBarrierSetC2State(Arena* arena) :
    43 int ZBarrierSetC2State::load_barrier_count() const {
    48     _stubs(new (arena) GrowableArray<ZLoadBarrierStubC2*>(arena, 8,  0, NULL)),
    44   return _load_barrier_nodes->length();
    49     _live(arena) {}
    45 }
    50 
    46 
    51   GrowableArray<ZLoadBarrierStubC2*>* stubs() {
    47 void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
    52     return _stubs;
    48   assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
    53   }
    49   _load_barrier_nodes->append(n);
    54 
    50 }
    55   RegMask* live(const Node* node) {
    51 
    56     if (!node->is_Mach()) {
    52 void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
    57       // Don't need liveness for non-MachNodes
    53   // this function may be called twice for a node so check
    58       return NULL;
    54   // that the node is in the array before attempting to remove it
    59     }
    55   if (_load_barrier_nodes->contains(n)) {
    60 
    56     _load_barrier_nodes->remove(n);
    61     const MachNode* const mach = node->as_Mach();
    57   }
    62     if (mach->barrier_data() != ZLoadBarrierStrong &&
    58 }
    63         mach->barrier_data() != ZLoadBarrierWeak) {
    59 
    64       // Don't need liveness data for nodes without barriers
    60 LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
    65       return NULL;
    61   return _load_barrier_nodes->at(idx);
    66     }
       
    67 
       
    68     RegMask* live = (RegMask*)_live[node->_idx];
       
    69     if (live == NULL) {
       
    70       live = new (Compile::current()->comp_arena()->Amalloc_D(sizeof(RegMask))) RegMask();
       
    71       _live.map(node->_idx, (Node*)live);
       
    72     }
       
    73 
       
    74     return live;
       
    75   }
       
    76 };
       
    77 
       
    78 static ZBarrierSetC2State* barrier_set_state() {
       
    79   return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
       
    80 }
       
    81 
       
    82 ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
       
    83   ZLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2(node, ref_addr, ref, tmp, weak);
       
    84   if (!Compile::current()->in_scratch_emit_size()) {
       
    85     barrier_set_state()->stubs()->append(stub);
       
    86   }
       
    87 
       
    88   return stub;
       
    89 }
       
    90 
       
    91 ZLoadBarrierStubC2::ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) :
       
    92     _node(node),
       
    93     _ref_addr(ref_addr),
       
    94     _ref(ref),
       
    95     _tmp(tmp),
       
    96     _weak(weak),
       
    97     _entry(),
       
    98     _continuation() {
       
    99   assert_different_registers(ref, ref_addr.base());
       
   100   assert_different_registers(ref, ref_addr.index());
       
   101 }
       
   102 
       
   103 Address ZLoadBarrierStubC2::ref_addr() const {
       
   104   return _ref_addr;
       
   105 }
       
   106 
       
   107 Register ZLoadBarrierStubC2::ref() const {
       
   108   return _ref;
       
   109 }
       
   110 
       
   111 Register ZLoadBarrierStubC2::tmp() const {
       
   112   return _tmp;
       
   113 }
       
   114 
       
   115 address ZLoadBarrierStubC2::slow_path() const {
       
   116   const DecoratorSet decorators = _weak ? ON_WEAK_OOP_REF : ON_STRONG_OOP_REF;
       
   117   return ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators);
       
   118 }
       
   119 
       
   120 RegMask& ZLoadBarrierStubC2::live() const {
       
   121   return *barrier_set_state()->live(_node);
       
   122 }
       
   123 
       
   124 Label* ZLoadBarrierStubC2::entry() {
       
   125   // The _entry will never be bound when in_scratch_emit_size() is true.
       
   126   // However, we still need to return a label that is not bound now, but
       
   127   // will eventually be bound. Any lable will do, as it will only act as
       
   128   // a placeholder, so we return the _continuation label.
       
   129   return Compile::current()->in_scratch_emit_size() ? &_continuation : &_entry;
       
   130 }
       
   131 
       
   132 Label* ZLoadBarrierStubC2::continuation() {
       
   133   return &_continuation;
    62 }
   134 }
    63 
   135 
    64 void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
   136 void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
    65   return new(comp_arena) ZBarrierSetC2State(comp_arena);
   137   return new (comp_arena) ZBarrierSetC2State(comp_arena);
    66 }
   138 }
    67 
   139 
    68 ZBarrierSetC2State* ZBarrierSetC2::state() const {
   140 void ZBarrierSetC2::late_barrier_analysis() const {
    69   return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
   141   analyze_dominating_barriers();
    70 }
   142   compute_liveness_at_stubs();
    71 
   143 }
    72 bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
   144 
    73   // 1. This step follows potential oop projections of a load barrier before expansion
   145 void ZBarrierSetC2::emit_stubs(CodeBuffer& cb) const {
    74   if (node->is_Proj()) {
   146   MacroAssembler masm(&cb);
    75     node = node->in(0);
   147   GrowableArray<ZLoadBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
    76   }
   148 
    77 
   149   for (int i = 0; i < stubs->length(); i++) {
    78   // 2. This step checks for unexpanded load barriers
   150     // Make sure there is enough space in the code buffer
    79   if (node->is_LoadBarrier()) {
   151     if (cb.insts()->maybe_expand_to_ensure_remaining(Compile::MAX_inst_size) && cb.blob() == NULL) {
    80     return true;
   152       ciEnv::current()->record_failure("CodeCache is full");
    81   }
   153       return;
    82 
   154     }
    83   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
   155 
    84   if (node->is_Phi()) {
   156     ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
    85     PhiNode* phi = node->as_Phi();
   157   }
    86     Node* n = phi->in(1);
   158 
    87     if (n != NULL && (n->is_LoadBarrierSlowReg() ||  n->is_LoadBarrierWeakSlowReg())) {
   159   masm.flush();
    88       return true;
   160 }
    89     }
   161 
    90   }
   162 int ZBarrierSetC2::estimate_stub_size() const {
    91 
   163   Compile* const C = Compile::current();
    92   return false;
   164   BufferBlob* const blob = C->scratch_buffer_blob();
    93 }
   165   GrowableArray<ZLoadBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
    94 
   166   int size = 0;
    95 void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
   167 
    96   if (node->is_LoadBarrier()) {
   168   for (int i = 0; i < stubs->length(); i++) {
    97     state()->add_load_barrier_node(node->as_LoadBarrier());
   169     CodeBuffer cb(blob->content_begin(), (address)C->scratch_locs_memory() - blob->content_begin());
    98   }
   170     MacroAssembler masm(&cb);
    99 }
   171     ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
   100 
   172     size += cb.insts_size();
   101 void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
   173   }
   102   if (node->is_LoadBarrier()) {
   174 
   103     state()->remove_load_barrier_node(node->as_LoadBarrier());
   175   return size;
   104   }
       
   105 }
       
   106 
       
   107 void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
       
   108   // Remove useless LoadBarrier nodes
       
   109   ZBarrierSetC2State* s = state();
       
   110   for (int i = s->load_barrier_count()-1; i >= 0; i--) {
       
   111     LoadBarrierNode* n = s->load_barrier_node(i);
       
   112     if (!useful.member(n)) {
       
   113       unregister_potential_barrier_node(n);
       
   114     }
       
   115   }
       
   116 }
       
   117 
       
   118 void ZBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
       
   119   if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
       
   120     igvn->_worklist.push(node);
       
   121   }
       
   122 }
       
   123 
       
   124 void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
       
   125   // Look for dominating barriers on the same address only once all
       
   126   // other loop opts are over. Loop opts may cause a safepoint to be
       
   127   // inserted between a barrier and its dominating barrier.
       
   128   Compile* C = Compile::current();
       
   129   ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
       
   130   ZBarrierSetC2State* s = bs->state();
       
   131   if (s->load_barrier_count() >= 2) {
       
   132     Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
       
   133     PhaseIdealLoop::optimize(igvn, LoopOptsLastRound);
       
   134     if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
       
   135   }
       
   136 }
       
   137 
       
   138 void ZBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {
       
   139   // Permanent temporary workaround
       
   140   // Loadbarriers may have non-obvious dead uses keeping them alive during parsing. The use is
       
   141   // removed by RemoveUseless (after parsing, before optimize) but the barriers won't be added to
       
   142   // the worklist. Unless we add them explicitly they are not guaranteed to end up there.
       
   143   ZBarrierSetC2State* s = state();
       
   144 
       
   145   for (int i = 0; i < s->load_barrier_count(); i++) {
       
   146     LoadBarrierNode* n = s->load_barrier_node(i);
       
   147     worklist->push(n);
       
   148   }
       
   149 }
       
   150 
       
   151 const TypeFunc* ZBarrierSetC2::load_barrier_Type() const {
       
   152   const Type** fields;
       
   153 
       
   154   // Create input types (domain)
       
   155   fields = TypeTuple::fields(2);
       
   156   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
       
   157   fields[TypeFunc::Parms+1] = TypeOopPtr::BOTTOM;
       
   158   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
       
   159 
       
   160   // Create result type (range)
       
   161   fields = TypeTuple::fields(1);
       
   162   fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
       
   163   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
       
   164 
       
   165   return TypeFunc::make(domain, range);
       
   166 }
       
   167 
       
   168 // == LoadBarrierNode ==
       
   169 
       
   170 LoadBarrierNode::LoadBarrierNode(Compile* C,
       
   171                                  Node* c,
       
   172                                  Node* mem,
       
   173                                  Node* val,
       
   174                                  Node* adr,
       
   175                                  bool weak,
       
   176                                  bool writeback,
       
   177                                  bool oop_reload_allowed) :
       
   178     MultiNode(Number_of_Inputs),
       
   179     _weak(weak),
       
   180     _writeback(writeback),
       
   181     _oop_reload_allowed(oop_reload_allowed) {
       
   182   init_req(Control, c);
       
   183   init_req(Memory, mem);
       
   184   init_req(Oop, val);
       
   185   init_req(Address, adr);
       
   186   init_req(Similar, C->top());
       
   187 
       
   188   init_class_id(Class_LoadBarrier);
       
   189   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
       
   190   bs->register_potential_barrier_node(this);
       
   191 }
       
   192 
       
   193 uint LoadBarrierNode::size_of() const {
       
   194   return sizeof(*this);
       
   195 }
       
   196 
       
   197 bool LoadBarrierNode::cmp(const Node& n) const {
       
   198   ShouldNotReachHere();
       
   199   return false;
       
   200 }
       
   201 
       
   202 const Type *LoadBarrierNode::bottom_type() const {
       
   203   const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
       
   204   Node* in_oop = in(Oop);
       
   205   floadbarrier[Control] = Type::CONTROL;
       
   206   floadbarrier[Memory] = Type::MEMORY;
       
   207   floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
       
   208   return TypeTuple::make(Number_of_Outputs, floadbarrier);
       
   209 }
       
   210 
       
   211 const TypePtr* LoadBarrierNode::adr_type() const {
       
   212   ShouldNotReachHere();
       
   213   return NULL;
       
   214 }
       
   215 
       
   216 const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
       
   217   const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
       
   218   const Type* val_t = phase->type(in(Oop));
       
   219   floadbarrier[Control] = Type::CONTROL;
       
   220   floadbarrier[Memory] = Type::MEMORY;
       
   221   floadbarrier[Oop] = val_t;
       
   222   return TypeTuple::make(Number_of_Outputs, floadbarrier);
       
   223 }
       
   224 
       
   225 bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
       
   226   if (phase != NULL) {
       
   227     return phase->is_dominator(d, n);
       
   228   }
       
   229 
       
   230   for (int i = 0; i < 10 && n != NULL; i++) {
       
   231     n = IfNode::up_one_dom(n, linear_only);
       
   232     if (n == d) {
       
   233       return true;
       
   234     }
       
   235   }
       
   236 
       
   237   return false;
       
   238 }
       
   239 
       
   240 LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
       
   241   Node* val = in(LoadBarrierNode::Oop);
       
   242   if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
       
   243     LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
       
   244     assert(lb->in(Address) == in(Address), "");
       
   245     // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
       
   246     if (lb->in(Oop) == in(Oop)) {
       
   247       return lb;
       
   248     }
       
   249     // Follow chain of load barrier through Similar edges
       
   250     while (!lb->in(Similar)->is_top()) {
       
   251       lb = lb->in(Similar)->in(0)->as_LoadBarrier();
       
   252       assert(lb->in(Address) == in(Address), "");
       
   253     }
       
   254     if (lb != in(Similar)->in(0)) {
       
   255       return lb;
       
   256     }
       
   257   }
       
   258   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
       
   259     Node* u = val->fast_out(i);
       
   260     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
       
   261       Node* this_ctrl = in(LoadBarrierNode::Control);
       
   262       Node* other_ctrl = u->in(LoadBarrierNode::Control);
       
   263       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
       
   264         return u->as_LoadBarrier();
       
   265       }
       
   266     }
       
   267   }
       
   268 
       
   269   if (ZVerifyLoadBarriers || can_be_eliminated()) {
       
   270     return NULL;
       
   271   }
       
   272 
       
   273   if (!look_for_similar) {
       
   274     return NULL;
       
   275   }
       
   276 
       
   277   Node* addr = in(LoadBarrierNode::Address);
       
   278   for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
       
   279     Node* u = addr->fast_out(i);
       
   280     if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
       
   281       Node* this_ctrl = in(LoadBarrierNode::Control);
       
   282       Node* other_ctrl = u->in(LoadBarrierNode::Control);
       
   283       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
       
   284         ResourceMark rm;
       
   285         Unique_Node_List wq;
       
   286         wq.push(in(LoadBarrierNode::Control));
       
   287         bool ok = true;
       
   288         bool dom_found = false;
       
   289         for (uint next = 0; next < wq.size(); ++next) {
       
   290           Node *n = wq.at(next);
       
   291           if (n->is_top()) {
       
   292             return NULL;
       
   293           }
       
   294           assert(n->is_CFG(), "");
       
   295           if (n->is_SafePoint()) {
       
   296             ok = false;
       
   297             break;
       
   298           }
       
   299           if (n == u) {
       
   300             dom_found = true;
       
   301             continue;
       
   302           }
       
   303           if (n->is_Region()) {
       
   304             for (uint i = 1; i < n->req(); i++) {
       
   305               Node* m = n->in(i);
       
   306               if (m != NULL) {
       
   307                 wq.push(m);
       
   308               }
       
   309             }
       
   310           } else {
       
   311             Node* m = n->in(0);
       
   312             if (m != NULL) {
       
   313               wq.push(m);
       
   314             }
       
   315           }
       
   316         }
       
   317         if (ok) {
       
   318           assert(dom_found, "");
       
   319           return u->as_LoadBarrier();;
       
   320         }
       
   321         break;
       
   322       }
       
   323     }
       
   324   }
       
   325 
       
   326   return NULL;
       
   327 }
       
   328 
       
   329 void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
       
   330   // Change to that barrier may affect a dominated barrier so re-push those
       
   331   Node* val = in(LoadBarrierNode::Oop);
       
   332 
       
   333   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
       
   334     Node* u = val->fast_out(i);
       
   335     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
       
   336       Node* this_ctrl = in(Control);
       
   337       Node* other_ctrl = u->in(Control);
       
   338       if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
       
   339         igvn->_worklist.push(u);
       
   340       }
       
   341     }
       
   342 
       
   343     Node* addr = in(LoadBarrierNode::Address);
       
   344     for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
       
   345       Node* u = addr->fast_out(i);
       
   346       if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
       
   347         Node* this_ctrl = in(Control);
       
   348         Node* other_ctrl = u->in(Control);
       
   349         if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
       
   350           igvn->_worklist.push(u);
       
   351         }
       
   352       }
       
   353     }
       
   354   }
       
   355 }
       
   356 
       
   357 Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
       
   358   if (!phase->C->directive()->ZOptimizeLoadBarriersOption) {
       
   359     return this;
       
   360   }
       
   361 
       
   362   bool redundant_addr = false;
       
   363   LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
       
   364   if (dominating_barrier != NULL) {
       
   365     assert(dominating_barrier->in(Oop) == in(Oop), "");
       
   366     return dominating_barrier;
       
   367   }
       
   368 
       
   369   return this;
       
   370 }
       
   371 
       
   372 Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
       
   373   if (remove_dead_region(phase, can_reshape)) {
       
   374     return this;
       
   375   }
       
   376 
       
   377   Node* val = in(Oop);
       
   378   Node* mem = in(Memory);
       
   379   Node* ctrl = in(Control);
       
   380   Node* adr = in(Address);
       
   381   assert(val->Opcode() != Op_LoadN, "");
       
   382 
       
   383   if (mem->is_MergeMem()) {
       
   384     Node* new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
       
   385     set_req(Memory, new_mem);
       
   386     if (mem->outcnt() == 0 && can_reshape) {
       
   387       phase->is_IterGVN()->_worklist.push(mem);
       
   388     }
       
   389 
       
   390     return this;
       
   391   }
       
   392 
       
   393   bool optimizeLoadBarriers = phase->C->directive()->ZOptimizeLoadBarriersOption;
       
   394   LoadBarrierNode* dominating_barrier = optimizeLoadBarriers ? has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()) : NULL;
       
   395   if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
       
   396     assert(in(Address) == dominating_barrier->in(Address), "");
       
   397     set_req(Similar, dominating_barrier->proj_out(Oop));
       
   398     return this;
       
   399   }
       
   400 
       
   401   bool eliminate = (optimizeLoadBarriers && !(val->is_Phi() || val->Opcode() == Op_LoadP || val->Opcode() == Op_GetAndSetP || val->is_DecodeN())) ||
       
   402                    (can_reshape && (dominating_barrier != NULL || !has_true_uses()));
       
   403 
       
   404   if (eliminate) {
       
   405     if (can_reshape) {
       
   406       PhaseIterGVN* igvn = phase->is_IterGVN();
       
   407       Node* out_ctrl = proj_out_or_null(Control);
       
   408       Node* out_res = proj_out_or_null(Oop);
       
   409 
       
   410       if (out_ctrl != NULL) {
       
   411         igvn->replace_node(out_ctrl, ctrl);
       
   412       }
       
   413 
       
   414       // That transformation may cause the Similar edge on the load barrier to be invalid
       
   415       fix_similar_in_uses(igvn);
       
   416       if (out_res != NULL) {
       
   417         if (dominating_barrier != NULL) {
       
   418           igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
       
   419         } else {
       
   420           igvn->replace_node(out_res, val);
       
   421         }
       
   422       }
       
   423     }
       
   424 
       
   425     return new ConINode(TypeInt::ZERO);
       
   426   }
       
   427 
       
   428   // If the Similar edge is no longer a load barrier, clear it
       
   429   Node* similar = in(Similar);
       
   430   if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
       
   431     set_req(Similar, phase->C->top());
       
   432     return this;
       
   433   }
       
   434 
       
   435   if (can_reshape) {
       
   436     // If this barrier is linked through the Similar edge by a
       
   437     // dominated barrier and both barriers have the same Oop field,
       
   438     // the dominated barrier can go away, so push it for reprocessing.
       
   439     // We also want to avoid a barrier to depend on another dominating
       
   440     // barrier through its Similar edge that itself depend on another
       
   441     // barrier through its Similar edge and rather have the first
       
   442     // depend on the third.
       
   443     PhaseIterGVN* igvn = phase->is_IterGVN();
       
   444     Node* out_res = proj_out(Oop);
       
   445     for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
       
   446       Node* u = out_res->fast_out(i);
       
   447       if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
       
   448           (u->in(Oop) == val || !u->in(Similar)->is_top())) {
       
   449         igvn->_worklist.push(u);
       
   450       }
       
   451     }
       
   452 
       
   453     push_dominated_barriers(igvn);
       
   454   }
       
   455 
       
   456   return NULL;
       
   457 }
       
   458 
       
   459 uint LoadBarrierNode::match_edge(uint idx) const {
       
   460   ShouldNotReachHere();
       
   461   return 0;
       
   462 }
       
   463 
       
   464 void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
       
   465   Node* out_res = proj_out_or_null(Oop);
       
   466   if (out_res == NULL) {
       
   467     return;
       
   468   }
       
   469 
       
   470   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
       
   471     Node* u = out_res->fast_out(i);
       
   472     if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
       
   473       igvn->replace_input_of(u, Similar, igvn->C->top());
       
   474       --i;
       
   475       --imax;
       
   476     }
       
   477   }
       
   478 }
       
   479 
       
   480 bool LoadBarrierNode::has_true_uses() const {
       
   481   Node* out_res = proj_out_or_null(Oop);
       
   482   if (out_res == NULL) {
       
   483     return false;
       
   484   }
       
   485 
       
   486   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
       
   487     Node* u = out_res->fast_out(i);
       
   488     if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
       
   489       return true;
       
   490     }
       
   491   }
       
   492 
       
   493   return false;
       
   494 }
       
   495 
       
   496 // == Accesses ==
       
   497 
       
   498 Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicParseAccess& access) const {
       
   499   assert(!UseCompressedOops, "Not allowed");
       
   500   CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
       
   501   PhaseGVN& gvn = access.gvn();
       
   502   Compile* C = Compile::current();
       
   503   GraphKit* kit = access.kit();
       
   504 
       
   505   Node* in_ctrl     = cas->in(MemNode::Control);
       
   506   Node* in_mem      = cas->in(MemNode::Memory);
       
   507   Node* in_adr      = cas->in(MemNode::Address);
       
   508   Node* in_val      = cas->in(MemNode::ValueIn);
       
   509   Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn);
       
   510 
       
   511   float likely                   = PROB_LIKELY(0.999);
       
   512 
       
   513   const TypePtr *adr_type        = gvn.type(in_adr)->isa_ptr();
       
   514   Compile::AliasType* alias_type = C->alias_type(adr_type);
       
   515   int alias_idx                  = C->get_alias_index(adr_type);
       
   516 
       
   517   // Outer check - true: continue, false: load and check
       
   518   Node* region   = new RegionNode(3);
       
   519   Node* phi      = new PhiNode(region, TypeInt::BOOL);
       
   520   Node* phi_mem  = new PhiNode(region, Type::MEMORY, adr_type);
       
   521 
       
   522   // Inner check - is the healed ref equal to the expected
       
   523   Node* region2  = new RegionNode(3);
       
   524   Node* phi2     = new PhiNode(region2, TypeInt::BOOL);
       
   525   Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type);
       
   526 
       
   527   // CAS node returns 0 or 1
       
   528   Node* cmp     = gvn.transform(new CmpINode(cas, kit->intcon(0)));
       
   529   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
       
   530   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
       
   531   Node* then    = gvn.transform(new IfTrueNode(iff));
       
   532   Node* elsen   = gvn.transform(new IfFalseNode(iff));
       
   533 
       
   534   Node* scmemproj1   = gvn.transform(new SCMemProjNode(cas));
       
   535 
       
   536   kit->set_memory(scmemproj1, alias_idx);
       
   537   phi_mem->init_req(1, scmemproj1);
       
   538   phi_mem2->init_req(2, scmemproj1);
       
   539 
       
   540   // CAS fail - reload and heal oop
       
   541   Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
       
   542   Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
       
   543   Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
       
   544   Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
       
   545 
       
   546   // Check load
       
   547   Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
       
   548   Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
       
   549   Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
       
   550   Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
       
   551   IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
       
   552   Node* then2   = gvn.transform(new IfTrueNode(iff2));
       
   553   Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
       
   554 
       
   555   // redo CAS
       
   556   Node* cas2       = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order()));
       
   557   Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2));
       
   558   kit->set_control(elsen2);
       
   559   kit->set_memory(scmemproj2, alias_idx);
       
   560 
       
   561   // Merge inner flow - check if healed oop was equal too expected.
       
   562   region2->set_req(1, kit->control());
       
   563   region2->set_req(2, then2);
       
   564   phi2->set_req(1, cas2);
       
   565   phi2->set_req(2, kit->intcon(0));
       
   566   phi_mem2->init_req(1, scmemproj2);
       
   567   kit->set_memory(phi_mem2, alias_idx);
       
   568 
       
   569   // Merge outer flow - then check if first CAS succeeded
       
   570   region->set_req(1, then);
       
   571   region->set_req(2, region2);
       
   572   phi->set_req(1, kit->intcon(1));
       
   573   phi->set_req(2, phi2);
       
   574   phi_mem->init_req(2, phi_mem2);
       
   575   kit->set_memory(phi_mem, alias_idx);
       
   576 
       
   577   gvn.transform(region2);
       
   578   gvn.transform(phi2);
       
   579   gvn.transform(phi_mem2);
       
   580   gvn.transform(region);
       
   581   gvn.transform(phi);
       
   582   gvn.transform(phi_mem);
       
   583 
       
   584   kit->set_control(region);
       
   585   kit->insert_mem_bar(Op_MemBarCPUOrder);
       
   586 
       
   587   return phi;
       
   588 }
       
   589 
       
   590 Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicParseAccess& access) const {
       
   591   CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
       
   592   GraphKit* kit = access.kit();
       
   593   PhaseGVN& gvn = kit->gvn();
       
   594   Compile* C = Compile::current();
       
   595 
       
   596   Node* in_ctrl     = cmpx->in(MemNode::Control);
       
   597   Node* in_mem      = cmpx->in(MemNode::Memory);
       
   598   Node* in_adr      = cmpx->in(MemNode::Address);
       
   599   Node* in_val      = cmpx->in(MemNode::ValueIn);
       
   600   Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn);
       
   601 
       
   602   float likely                   = PROB_LIKELY(0.999);
       
   603 
       
   604   const TypePtr *adr_type        = cmpx->get_ptr_type();
       
   605   Compile::AliasType* alias_type = C->alias_type(adr_type);
       
   606   int alias_idx                  = C->get_alias_index(adr_type);
       
   607 
       
   608   // Outer check - true: continue, false: load and check
       
   609   Node* region  = new RegionNode(3);
       
   610   Node* phi     = new PhiNode(region, adr_type);
       
   611 
       
   612   // Inner check - is the healed ref equal to the expected
       
   613   Node* region2 = new RegionNode(3);
       
   614   Node* phi2    = new PhiNode(region2, adr_type);
       
   615 
       
   616   // Check if cmpx succeeded
       
   617   Node* cmp     = gvn.transform(new CmpPNode(cmpx, in_expected));
       
   618   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
       
   619   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
       
   620   Node* then    = gvn.transform(new IfTrueNode(iff));
       
   621   Node* elsen   = gvn.transform(new IfFalseNode(iff));
       
   622 
       
   623   Node* scmemproj1  = gvn.transform(new SCMemProjNode(cmpx));
       
   624   kit->set_memory(scmemproj1, alias_idx);
       
   625 
       
   626   // CAS fail - reload and heal oop
       
   627   Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
       
   628   Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
       
   629   Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
       
   630   Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
       
   631 
       
   632   // Check load
       
   633   Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
       
   634   Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
       
   635   Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
       
   636   Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
       
   637   IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
       
   638   Node* then2   = gvn.transform(new IfTrueNode(iff2));
       
   639   Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
       
   640 
       
   641   // Redo CAS
       
   642   Node* cmpx2      = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order()));
       
   643   Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2));
       
   644   kit->set_control(elsen2);
       
   645   kit->set_memory(scmemproj2, alias_idx);
       
   646 
       
   647   // Merge inner flow - check if healed oop was equal too expected.
       
   648   region2->set_req(1, kit->control());
       
   649   region2->set_req(2, then2);
       
   650   phi2->set_req(1, cmpx2);
       
   651   phi2->set_req(2, barrierdata);
       
   652 
       
   653   // Merge outer flow - then check if first cas succeeded
       
   654   region->set_req(1, then);
       
   655   region->set_req(2, region2);
       
   656   phi->set_req(1, cmpx);
       
   657   phi->set_req(2, phi2);
       
   658 
       
   659   gvn.transform(region2);
       
   660   gvn.transform(phi2);
       
   661   gvn.transform(region);
       
   662   gvn.transform(phi);
       
   663 
       
   664   kit->set_control(region);
       
   665   kit->set_memory(in_mem, alias_idx);
       
   666   kit->insert_mem_bar(Op_MemBarCPUOrder);
       
   667 
       
   668   return phi;
       
   669 }
       
   670 
       
   671 Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const {
       
   672   PhaseGVN& gvn = kit->gvn();
       
   673   Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed);
       
   674   Node* transformed_barrier = gvn.transform(barrier);
       
   675 
       
   676   if (transformed_barrier->is_LoadBarrier()) {
       
   677     if (barrier == transformed_barrier) {
       
   678       kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
       
   679     }
       
   680     Node* result = gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
       
   681     return result;
       
   682   } else {
       
   683     return val;
       
   684   }
       
   685 }
   176 }
   686 
   177 
   687 static bool barrier_needed(C2Access& access) {
   178 static bool barrier_needed(C2Access& access) {
   688   return ZBarrierSet::barrier_needed(access.decorators(), access.type());
   179   return ZBarrierSet::barrier_needed(access.decorators(), access.type());
   689 }
   180 }
   690 
   181 
   691 Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
   182 Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
   692   Node* p = BarrierSetC2::load_at_resolved(access, val_type);
   183   Node* result = BarrierSetC2::load_at_resolved(access, val_type);
   693   if (!barrier_needed(access)) {
   184   if (barrier_needed(access) && access.raw_access()->is_Mem()) {
   694     return p;
   185     if ((access.decorators() & ON_WEAK_OOP_REF) != 0) {
   695   }
   186       access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierWeak);
   696 
       
   697   bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
       
   698 
       
   699   assert(access.is_parse_access(), "entry not supported at optimization time");
       
   700   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
       
   701   GraphKit* kit = parse_access.kit();
       
   702   PhaseGVN& gvn = kit->gvn();
       
   703   Node* adr = access.addr().node();
       
   704   Node* heap_base_oop = access.base();
       
   705   bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0;
       
   706   if (unsafe) {
       
   707     if (!ZVerifyLoadBarriers) {
       
   708       p = load_barrier(kit, p, adr);
       
   709     } else {
   187     } else {
   710       if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) {
   188       access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierStrong);
   711         p = load_barrier(kit, p, adr);
   189     }
   712       } else {
   190   }
   713         IdealKit ideal(kit);
   191 
   714         IdealVariable res(ideal);
   192   return result;
   715 #define __ ideal.
       
   716         __ declarations_done();
       
   717         __ set(res, p);
       
   718         __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); {
       
   719           kit->sync_kit(ideal);
       
   720           p = load_barrier(kit, p, adr);
       
   721           __ set(res, p);
       
   722           __ sync_kit(kit);
       
   723         } __ end_if();
       
   724         kit->final_sync(ideal);
       
   725         p = __ value(res);
       
   726 #undef __
       
   727       }
       
   728     }
       
   729     return p;
       
   730   } else {
       
   731     return load_barrier(parse_access.kit(), p, access.addr().node(), weak, true, true);
       
   732   }
       
   733 }
   193 }
   734 
   194 
   735 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
   195 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
   736                                                     Node* new_val, const Type* val_type) const {
   196                                                     Node* new_val, const Type* val_type) const {
   737   Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
   197   Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
   738   if (!barrier_needed(access)) {
   198   if (barrier_needed(access)) {
   739     return result;
   199     access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
   740   }
   200   }
   741 
   201   return result;
   742   access.set_needs_pinning(false);
       
   743   return make_cmpx_loadbarrier(access);
       
   744 }
   202 }
   745 
   203 
   746 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
   204 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
   747                                                      Node* new_val, const Type* value_type) const {
   205                                                      Node* new_val, const Type* value_type) const {
   748   Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
   206   Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
   749   if (!barrier_needed(access)) {
   207   if (barrier_needed(access)) {
   750     return result;
   208     access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
   751   }
   209   }
   752 
   210   return result;
   753   Node* load_store = access.raw_access();
       
   754   bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0;
       
   755   bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR);
       
   756 
       
   757   if (!expected_is_null) {
       
   758     if (weak_cas) {
       
   759       access.set_needs_pinning(false);
       
   760       load_store = make_cas_loadbarrier(access);
       
   761     } else {
       
   762       access.set_needs_pinning(false);
       
   763       load_store = make_cas_loadbarrier(access);
       
   764     }
       
   765   }
       
   766 
       
   767   return load_store;
       
   768 }
   211 }
   769 
   212 
   770 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
   213 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
   771   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
   214   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
   772   if (!barrier_needed(access)) {
   215   if (barrier_needed(access)) {
   773     return result;
   216     access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
   774   }
   217   }
   775 
   218   return result;
   776   Node* load_store = access.raw_access();
   219 }
   777   Node* adr = access.addr().node();
   220 
   778 
   221 bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type,
   779   assert(access.is_parse_access(), "entry not supported at optimization time");
   222                                                     bool is_clone, ArrayCopyPhase phase) const {
   780   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
   223   return type == T_OBJECT || type == T_ARRAY;
   781   return load_barrier(parse_access.kit(), load_store, adr, false, false, false);
   224 }
   782 }
   225 
   783 
   226 // == Dominating barrier elision ==
   784 // == Macro Expansion ==
   227 
   785 
   228 static bool block_has_safepoint(const Block* block, uint from, uint to) {
   786 void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
   229   for (uint i = from; i < to; i++) {
   787   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
   230     if (block->get_node(i)->is_MachSafePoint()) {
   788   Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
   231       // Safepoint found
   789   Node* in_val  = barrier->in(LoadBarrierNode::Oop);
   232       return true;
   790   Node* in_adr  = barrier->in(LoadBarrierNode::Address);
   233     }
   791 
   234   }
   792   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
   235 
   793   Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
   236   // Safepoint not found
   794 
   237   return false;
   795   PhaseIterGVN &igvn = phase->igvn();
   238 }
   796 
   239 
   797   if (ZVerifyLoadBarriers) {
   240 static bool block_has_safepoint(const Block* block) {
   798     igvn.replace_node(out_res, in_val);
   241   return block_has_safepoint(block, 0, block->number_of_nodes());
   799     igvn.replace_node(out_ctrl, in_ctrl);
   242 }
   800     return;
   243 
   801   }
   244 static uint block_index(const Block* block, const Node* node) {
   802 
   245   for (uint j = 0; j < block->number_of_nodes(); ++j) {
   803   if (barrier->can_be_eliminated()) {
   246     if (block->get_node(j) == node) {
   804     // Clone and pin the load for this barrier below the dominating
   247       return j;
   805     // barrier: the load cannot be allowed to float above the
   248     }
   806     // dominating barrier
   249   }
   807     Node* load = in_val;
   250   ShouldNotReachHere();
   808 
   251   return 0;
   809     if (load->is_Load()) {
   252 }
   810       Node* new_load = load->clone();
   253 
   811       Node* addp = new_load->in(MemNode::Address);
   254 void ZBarrierSetC2::analyze_dominating_barriers() const {
   812       assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address");
   255   ResourceMark rm;
   813       Node* cast = new CastPPNode(addp, igvn.type(addp), true);
   256   Compile* const C = Compile::current();
   814       Node* ctrl = NULL;
   257   PhaseCFG* const cfg = C->cfg();
   815       Node* similar = barrier->in(LoadBarrierNode::Similar);
   258   Block_List worklist;
   816       if (similar->is_Phi()) {
   259   Node_List mem_ops;
   817         // already expanded
   260   Node_List barrier_loads;
   818         ctrl = similar->in(0);
   261 
   819       } else {
   262   // Step 1 - Find accesses, and track them in lists
   820         assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape");
   263   for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
   821         ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control);
   264     const Block* const block = cfg->get_block(i);
   822       }
   265     for (uint j = 0; j < block->number_of_nodes(); ++j) {
   823       assert(ctrl != NULL, "bad control");
   266       const Node* const node = block->get_node(j);
   824       cast->set_req(0, ctrl);
   267       if (!node->is_Mach()) {
   825       igvn.transform(cast);
       
   826       new_load->set_req(MemNode::Address, cast);
       
   827       igvn.transform(new_load);
       
   828 
       
   829       igvn.replace_node(out_res, new_load);
       
   830       igvn.replace_node(out_ctrl, in_ctrl);
       
   831       return;
       
   832     }
       
   833     // cannot eliminate
       
   834   }
       
   835 
       
   836   // There are two cases that require the basic loadbarrier
       
   837   // 1) When the writeback of a healed oop must be avoided (swap)
       
   838   // 2) When we must guarantee that no reload of is done (swap, cas, cmpx)
       
   839   if (!barrier->is_writeback()) {
       
   840     assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop");
       
   841   }
       
   842 
       
   843   if (!barrier->oop_reload_allowed()) {
       
   844     expand_loadbarrier_basic(phase, barrier);
       
   845   } else {
       
   846     expand_loadbarrier_optimized(phase, barrier);
       
   847   }
       
   848 }
       
   849 
       
   850 // Basic loadbarrier using conventional argument passing
       
   851 void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
       
   852   PhaseIterGVN &igvn = phase->igvn();
       
   853 
       
   854   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
       
   855   Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
       
   856   Node* in_val  = barrier->in(LoadBarrierNode::Oop);
       
   857   Node* in_adr  = barrier->in(LoadBarrierNode::Address);
       
   858 
       
   859   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
       
   860   Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
       
   861 
       
   862   float unlikely  = PROB_UNLIKELY(0.999);
       
   863   const Type* in_val_maybe_null_t = igvn.type(in_val);
       
   864 
       
   865   Node* jthread = igvn.transform(new ThreadLocalNode());
       
   866   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
       
   867   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
       
   868   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
       
   869   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
       
   870   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
       
   871   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
       
   872   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
       
   873   Node* then = igvn.transform(new IfTrueNode(iff));
       
   874   Node* elsen = igvn.transform(new IfFalseNode(iff));
       
   875 
       
   876   Node* result_region;
       
   877   Node* result_val;
       
   878 
       
   879   result_region = new RegionNode(3);
       
   880   result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM);
       
   881 
       
   882   result_region->set_req(1, elsen);
       
   883   Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t));
       
   884   res->init_req(0, elsen);
       
   885   result_val->set_req(1, res);
       
   886 
       
   887   const TypeFunc *tf = load_barrier_Type();
       
   888   Node* call;
       
   889   if (barrier->is_weak()) {
       
   890     call = new CallLeafNode(tf,
       
   891                             ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(),
       
   892                             "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded",
       
   893                             TypeRawPtr::BOTTOM);
       
   894   } else {
       
   895     call = new CallLeafNode(tf,
       
   896                             ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(),
       
   897                             "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded",
       
   898                             TypeRawPtr::BOTTOM);
       
   899   }
       
   900 
       
   901   call->init_req(TypeFunc::Control, then);
       
   902   call->init_req(TypeFunc::I_O    , phase->top());
       
   903   call->init_req(TypeFunc::Memory , in_mem);
       
   904   call->init_req(TypeFunc::FramePtr, phase->top());
       
   905   call->init_req(TypeFunc::ReturnAdr, phase->top());
       
   906   call->init_req(TypeFunc::Parms+0, in_val);
       
   907   if (barrier->is_writeback()) {
       
   908     call->init_req(TypeFunc::Parms+1, in_adr);
       
   909   } else {
       
   910     // When slow path is called with a null address, the healed oop will not be written back
       
   911     call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
       
   912   }
       
   913   call = igvn.transform(call);
       
   914 
       
   915   Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control));
       
   916   res = igvn.transform(new ProjNode(call, TypeFunc::Parms));
       
   917   res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t));
       
   918 
       
   919   result_region->set_req(2, ctrl);
       
   920   result_val->set_req(2, res);
       
   921 
       
   922   result_region = igvn.transform(result_region);
       
   923   result_val = igvn.transform(result_val);
       
   924 
       
   925   if (out_ctrl != NULL) { // Added if cond
       
   926     igvn.replace_node(out_ctrl, result_region);
       
   927   }
       
   928   igvn.replace_node(out_res, result_val);
       
   929 }
       
   930 
       
   931 // Optimized, low spill, loadbarrier variant using stub specialized on register used
       
   932 void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
       
   933   PhaseIterGVN &igvn = phase->igvn();
       
   934 #ifdef PRINT_NODE_TRAVERSALS
       
   935   Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop);
       
   936 #endif
       
   937 
       
   938   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
       
   939   Node* in_mem = barrier->in(LoadBarrierNode::Memory);
       
   940   Node* in_val = barrier->in(LoadBarrierNode::Oop);
       
   941   Node* in_adr = barrier->in(LoadBarrierNode::Address);
       
   942 
       
   943   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
       
   944   Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
       
   945 
       
   946   assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
       
   947 
       
   948 #ifdef PRINT_NODE_TRAVERSALS
       
   949   tty->print("\n\n\nBefore barrier optimization:\n");
       
   950   traverse(barrier, out_ctrl, out_res, -1);
       
   951 
       
   952   tty->print("\nBefore barrier optimization:  preceding_barrier_node\n");
       
   953   traverse(preceding_barrier_node, out_ctrl, out_res, -1);
       
   954 #endif
       
   955 
       
   956   float unlikely  = PROB_UNLIKELY(0.999);
       
   957 
       
   958   Node* jthread = igvn.transform(new ThreadLocalNode());
       
   959   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
       
   960   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
       
   961                                                  TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
       
   962                                                  MemNode::unordered));
       
   963   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
       
   964   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
       
   965   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
       
   966   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
       
   967   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
       
   968   Node* then = igvn.transform(new IfTrueNode(iff));
       
   969   Node* elsen = igvn.transform(new IfFalseNode(iff));
       
   970 
       
   971   Node* slow_path_surrogate;
       
   972   if (!barrier->is_weak()) {
       
   973     slow_path_surrogate = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
       
   974                                                                     (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
       
   975   } else {
       
   976     slow_path_surrogate = igvn.transform(new LoadBarrierWeakSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
       
   977                                                                         (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
       
   978   }
       
   979 
       
   980   Node *new_loadp;
       
   981   new_loadp = slow_path_surrogate;
       
   982   // Create the final region/phi pair to converge cntl/data paths to downstream code
       
   983   Node* result_region = igvn.transform(new RegionNode(3));
       
   984   result_region->set_req(1, then);
       
   985   result_region->set_req(2, elsen);
       
   986 
       
   987   Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
       
   988   result_phi->set_req(1, new_loadp);
       
   989   result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
       
   990 
       
   991   // Finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
       
   992   // igvn.replace_node(out_ctrl, result_region);
       
   993   if (out_ctrl != NULL) { // added if cond
       
   994     igvn.replace_node(out_ctrl, result_region);
       
   995   }
       
   996   igvn.replace_node(out_res, result_phi);
       
   997 
       
   998   assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
       
   999 
       
  1000 #ifdef PRINT_NODE_TRAVERSALS
       
  1001   tty->print("\nAfter barrier optimization:  old out_ctrl\n");
       
  1002   traverse(out_ctrl, out_ctrl, out_res, -1);
       
  1003   tty->print("\nAfter barrier optimization:  old out_res\n");
       
  1004   traverse(out_res, out_ctrl, out_res, -1);
       
  1005   tty->print("\nAfter barrier optimization:  old barrier\n");
       
  1006   traverse(barrier, out_ctrl, out_res, -1);
       
  1007   tty->print("\nAfter barrier optimization:  preceding_barrier_node\n");
       
  1008   traverse(preceding_barrier_node, result_region, result_phi, -1);
       
  1009 #endif
       
  1010 
       
  1011   assert(is_gc_barrier_node(result_phi), "sanity");
       
  1012   assert(step_over_gc_barrier(result_phi) == in_val, "sanity");
       
  1013 }
       
  1014 
       
  1015 bool ZBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
       
  1016   ZBarrierSetC2State* s = state();
       
  1017   if (s->load_barrier_count() > 0) {
       
  1018     PhaseMacroExpand macro(igvn);
       
  1019 #ifdef ASSERT
       
  1020     verify_gc_barriers(false);
       
  1021 #endif
       
  1022     int skipped = 0;
       
  1023     while (s->load_barrier_count() > skipped) {
       
  1024       int load_barrier_count = s->load_barrier_count();
       
  1025       LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
       
  1026       if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
       
  1027         // Node is unreachable, so don't try to expand it
       
  1028         s->remove_load_barrier_node(n);
       
  1029         continue;
   268         continue;
  1030       }
   269       }
  1031       if (!n->can_be_eliminated()) {
   270 
  1032         skipped++;
   271       MachNode* const mach = node->as_Mach();
       
   272       switch (mach->ideal_Opcode()) {
       
   273       case Op_LoadP:
       
   274       case Op_CompareAndExchangeP:
       
   275       case Op_CompareAndSwapP:
       
   276       case Op_GetAndSetP:
       
   277         if (mach->barrier_data() == ZLoadBarrierStrong) {
       
   278           barrier_loads.push(mach);
       
   279         }
       
   280       case Op_StoreP:
       
   281         mem_ops.push(mach);
       
   282         break;
       
   283 
       
   284       default:
       
   285         break;
       
   286       }
       
   287     }
       
   288   }
       
   289 
       
   290   // Step 2 - Find dominating accesses for each load
       
   291   for (uint i = 0; i < barrier_loads.size(); i++) {
       
   292     MachNode* const load = barrier_loads.at(i)->as_Mach();
       
   293     const TypePtr* load_adr_type = NULL;
       
   294     intptr_t load_offset = 0;
       
   295     const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type);
       
   296     Block* const load_block = cfg->get_block_for_node(load);
       
   297     const uint load_index = block_index(load_block, load);
       
   298 
       
   299     for (uint j = 0; j < mem_ops.size(); j++) {
       
   300       MachNode* mem = mem_ops.at(j)->as_Mach();
       
   301       const TypePtr* mem_adr_type = NULL;
       
   302       intptr_t mem_offset = 0;
       
   303       const Node* mem_obj = mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type);
       
   304       Block* mem_block = cfg->get_block_for_node(mem);
       
   305       uint mem_index = block_index(mem_block, mem);
       
   306 
       
   307       if (load_obj == NodeSentinel || mem_obj == NodeSentinel ||
       
   308           load_obj == NULL || mem_obj == NULL ||
       
   309           load_offset < 0 || mem_offset < 0) {
  1033         continue;
   310         continue;
  1034       }
   311       }
  1035       expand_loadbarrier_node(&macro, n);
   312 
  1036       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
   313       if (mem_obj != load_obj || mem_offset != load_offset) {
  1037       if (C->failing()) {
   314         // Not the same addresses, not a candidate
  1038         return true;
   315         continue;
  1039       }
   316       }
  1040     }
   317 
  1041     while (s->load_barrier_count() > 0) {
   318       if (load_block == mem_block) {
  1042       int load_barrier_count = s->load_barrier_count();
   319         // Earlier accesses in the same block
  1043       LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
   320         if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) {
  1044       assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
   321           load->set_barrier_data(ZLoadBarrierElided);
  1045       assert(!n->can_be_eliminated(), "should have been processed already");
   322         }
  1046       expand_loadbarrier_node(&macro, n);
   323       } else if (mem_block->dominates(load_block)) {
  1047       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
   324         // Dominating block? Look around for safepoints
  1048       if (C->failing()) {
   325         ResourceMark rm;
  1049         return true;
   326         Block_List stack;
  1050       }
   327         VectorSet visited(Thread::current()->resource_area());
  1051     }
   328         stack.push(load_block);
  1052     igvn.set_delay_transform(false);
   329         bool safepoint_found = block_has_safepoint(load_block);
  1053     igvn.optimize();
   330         while (!safepoint_found && stack.size() > 0) {
  1054     if (C->failing()) {
   331           Block* block = stack.pop();
  1055       return true;
   332           if (visited.test_set(block->_pre_order)) {
  1056     }
   333             continue;
  1057   }
       
  1058 
       
  1059   return false;
       
  1060 }
       
  1061 
       
  1062 // == Loop optimization ==
       
  1063 
       
  1064 static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
       
  1065   PhaseIterGVN &igvn = phase->igvn();
       
  1066   Compile* C = Compile::current();
       
  1067 
       
  1068   LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
       
  1069   if (lb2 == NULL) {
       
  1070     return false;
       
  1071   }
       
  1072 
       
  1073   if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
       
  1074     assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "Invalid address");
       
  1075     igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
       
  1076     C->set_major_progress();
       
  1077     return false;
       
  1078   }
       
  1079 
       
  1080   // That transformation may cause the Similar edge on dominated load barriers to be invalid
       
  1081   lb->fix_similar_in_uses(&igvn);
       
  1082 
       
  1083   Node* val = lb->proj_out(LoadBarrierNode::Oop);
       
  1084   assert(lb2->has_true_uses(), "Invalid uses");
       
  1085   assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "Invalid oop");
       
  1086   phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
       
  1087   phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
       
  1088   igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
       
  1089 
       
  1090   return true;
       
  1091 }
       
  1092 
       
  1093 static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
       
  1094   assert(dom->is_Region() || i == -1, "");
       
  1095 
       
  1096   Node* m = mem;
       
  1097   while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
       
  1098     if (m->is_Mem()) {
       
  1099       assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, "");
       
  1100       m = m->in(MemNode::Memory);
       
  1101     } else if (m->is_MergeMem()) {
       
  1102       m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
       
  1103     } else if (m->is_Phi()) {
       
  1104       if (m->in(0) == dom && i != -1) {
       
  1105         m = m->in(i);
       
  1106         break;
       
  1107       } else {
       
  1108         m = m->in(LoopNode::EntryControl);
       
  1109       }
       
  1110     } else if (m->is_Proj()) {
       
  1111       m = m->in(0);
       
  1112     } else if (m->is_SafePoint() || m->is_MemBar()) {
       
  1113       m = m->in(TypeFunc::Memory);
       
  1114     } else {
       
  1115 #ifdef ASSERT
       
  1116       m->dump();
       
  1117 #endif
       
  1118       ShouldNotReachHere();
       
  1119     }
       
  1120   }
       
  1121 
       
  1122   return m;
       
  1123 }
       
  1124 
       
  1125 static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) {
       
  1126   PhaseIterGVN &igvn = phase->igvn();
       
  1127   Compile* C = Compile::current();
       
  1128   Node* the_clone = lb->clone();
       
  1129   the_clone->set_req(LoadBarrierNode::Control, ctl);
       
  1130   the_clone->set_req(LoadBarrierNode::Memory, mem);
       
  1131   if (oop_in != NULL) {
       
  1132     the_clone->set_req(LoadBarrierNode::Oop, oop_in);
       
  1133   }
       
  1134 
       
  1135   LoadBarrierNode* new_lb = the_clone->as_LoadBarrier();
       
  1136   igvn.register_new_node_with_optimizer(new_lb);
       
  1137   IdealLoopTree *loop = phase->get_loop(new_lb->in(0));
       
  1138   phase->set_ctrl(new_lb, new_lb->in(0));
       
  1139   phase->set_loop(new_lb, loop);
       
  1140   phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1);
       
  1141   if (!loop->_child) {
       
  1142     loop->_body.push(new_lb);
       
  1143   }
       
  1144 
       
  1145   Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control);
       
  1146   igvn.register_new_node_with_optimizer(proj_ctl);
       
  1147   phase->set_ctrl(proj_ctl, proj_ctl->in(0));
       
  1148   phase->set_loop(proj_ctl, loop);
       
  1149   phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1);
       
  1150   if (!loop->_child) {
       
  1151     loop->_body.push(proj_ctl);
       
  1152   }
       
  1153 
       
  1154   Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop);
       
  1155   phase->register_new_node(proj_oop, new_lb);
       
  1156 
       
  1157   if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) {
       
  1158     LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier();
       
  1159     if (!phase->is_dominator(similar, ctl)) {
       
  1160       igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top());
       
  1161     }
       
  1162   }
       
  1163 
       
  1164   return new_lb;
       
  1165 }
       
  1166 
       
  1167 static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) {
       
  1168   PhaseIterGVN &igvn = phase->igvn();
       
  1169   Node* val = lb->proj_out(LoadBarrierNode::Oop);
       
  1170   igvn.replace_node(val, new_val);
       
  1171   phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
       
  1172   phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
       
  1173 }
       
  1174 
       
  1175 static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
       
  1176   PhaseIterGVN &igvn = phase->igvn();
       
  1177   Compile* C = Compile::current();
       
  1178 
       
  1179   if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
       
  1180     Node* oop_phi = lb->in(LoadBarrierNode::Oop);
       
  1181 
       
  1182     if ((oop_phi->req() != 3) || (oop_phi->in(2) == oop_phi)) {
       
  1183       // Ignore phis with only one input
       
  1184       return false;
       
  1185     }
       
  1186 
       
  1187     if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)),
       
  1188                             oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) {
       
  1189       // That transformation may cause the Similar edge on dominated load barriers to be invalid
       
  1190       lb->fix_similar_in_uses(&igvn);
       
  1191 
       
  1192       RegionNode* region = oop_phi->in(0)->as_Region();
       
  1193 
       
  1194       int backedge = LoopNode::LoopBackControl;
       
  1195       if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) {
       
  1196         Node* c = region->in(backedge)->in(0)->in(0);
       
  1197         assert(c->unique_ctrl_out() == region->in(backedge)->in(0), "");
       
  1198         Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge);
       
  1199         Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop;
       
  1200         if (!phase->is_dominator(oop_c, c)) {
       
  1201           return false;
       
  1202         }
       
  1203       }
       
  1204 
       
  1205       // If the node on the backedge above the phi is the node itself - we have a self loop.
       
  1206       // Don't clone - this will be folded later.
       
  1207       if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) {
       
  1208         return false;
       
  1209       }
       
  1210 
       
  1211       bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined();
       
  1212       Node *phi = oop_phi->clone();
       
  1213 
       
  1214       for (uint i = 1; i < region->req(); i++) {
       
  1215         Node* ctrl = region->in(i);
       
  1216         if (ctrl != C->top()) {
       
  1217           assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), "");
       
  1218 
       
  1219           Node* mem = lb->in(LoadBarrierNode::Memory);
       
  1220           Node* m = find_dominating_memory(phase, mem, region, i);
       
  1221 
       
  1222           if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
       
  1223             ctrl = ctrl->in(0)->in(0);
       
  1224           } else if (region->is_Loop() && is_strip_mined) {
       
  1225             // If this is a strip mined loop, control must move above OuterStripMinedLoop
       
  1226             assert(i == LoopNode::EntryControl, "check");
       
  1227             assert(ctrl->is_OuterStripMinedLoop(), "sanity");
       
  1228             ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl);
       
  1229           }
   334           }
  1230 
   335           if (block_has_safepoint(block)) {
  1231           LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i));
   336             safepoint_found = true;
  1232           Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control);
   337             break;
  1233 
       
  1234           if (is_strip_mined && (i == LoopNode::EntryControl)) {
       
  1235             assert(region->in(i)->is_OuterStripMinedLoop(), "");
       
  1236             igvn.replace_input_of(region->in(i), i, out_ctrl);
       
  1237             phase->set_idom(region->in(i), out_ctrl, phase->dom_depth(out_ctrl));
       
  1238           } else if (ctrl == region->in(i)) {
       
  1239             igvn.replace_input_of(region, i, out_ctrl);
       
  1240             // Only update the idom if is the loop entry we are updating
       
  1241             // - A loop backedge doesn't change the idom
       
  1242             if (region->is_Loop() && i == LoopNode::EntryControl) {
       
  1243               phase->set_idom(region, out_ctrl, phase->dom_depth(out_ctrl));
       
  1244             }
       
  1245           } else {
       
  1246             Node* iff = region->in(i)->in(0);
       
  1247             igvn.replace_input_of(iff, 0, out_ctrl);
       
  1248             phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1);
       
  1249           }
   338           }
  1250           phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop));
   339           if (block == mem_block) {
  1251         }
   340             continue;
  1252       }
       
  1253       phase->register_new_node(phi, region);
       
  1254       replace_barrier(phase, lb, phi);
       
  1255 
       
  1256       if (region->is_Loop()) {
       
  1257         // Load barrier moved to the back edge of the Loop may now
       
  1258         // have a safepoint on the path to the barrier on the Similar
       
  1259         // edge
       
  1260         igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top());
       
  1261         Node* head = region->in(LoopNode::EntryControl);
       
  1262         phase->set_idom(region, head, phase->dom_depth(head)+1);
       
  1263         phase->recompute_dom_depth();
       
  1264         if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) {
       
  1265           head->as_CountedLoop()->set_normal_loop();
       
  1266         }
       
  1267       }
       
  1268 
       
  1269       return true;
       
  1270     }
       
  1271   }
       
  1272 
       
  1273   return false;
       
  1274 }
       
  1275 
       
  1276 static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
       
  1277   PhaseIterGVN &igvn = phase->igvn();
       
  1278   IdealLoopTree *lb_loop = phase->get_loop(lb->in(0));
       
  1279   if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) {
       
  1280     Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop));
       
  1281     IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl);
       
  1282     IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address)));
       
  1283     if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) {
       
  1284       // That transformation may cause the Similar edge on dominated load barriers to be invalid
       
  1285       lb->fix_similar_in_uses(&igvn);
       
  1286 
       
  1287       Node* head = lb_loop->_head;
       
  1288       assert(head->is_Loop(), "");
       
  1289 
       
  1290       if (phase->is_dominator(head, oop_ctrl)) {
       
  1291         assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, "");
       
  1292         assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), "");
       
  1293         return false;
       
  1294       }
       
  1295 
       
  1296       if (head->is_CountedLoop()) {
       
  1297         CountedLoopNode* cloop = head->as_CountedLoop();
       
  1298         if (cloop->is_main_loop()) {
       
  1299           cloop->set_normal_loop();
       
  1300         }
       
  1301         // When we are moving barrier out of a counted loop,
       
  1302         // make sure we move it all the way out of the strip mined outer loop.
       
  1303         if (cloop->is_strip_mined()) {
       
  1304           head = cloop->outer_loop();
       
  1305         }
       
  1306       }
       
  1307 
       
  1308       Node* mem = lb->in(LoadBarrierNode::Memory);
       
  1309       Node* m = find_dominating_memory(phase, mem, head, -1);
       
  1310 
       
  1311       LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL);
       
  1312 
       
  1313       assert(phase->idom(head) == head->in(LoopNode::EntryControl), "");
       
  1314       Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
       
  1315       igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl);
       
  1316       phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1);
       
  1317 
       
  1318       replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop));
       
  1319 
       
  1320       phase->recompute_dom_depth();
       
  1321 
       
  1322       return true;
       
  1323     }
       
  1324   }
       
  1325 
       
  1326   return false;
       
  1327 }
       
  1328 
       
  1329 static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
       
  1330   PhaseIterGVN &igvn = phase->igvn();
       
  1331   Node* in_val = lb->in(LoadBarrierNode::Oop);
       
  1332   for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) {
       
  1333     Node* u = in_val->fast_out(i);
       
  1334     if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
       
  1335       Node* this_ctrl = lb->in(LoadBarrierNode::Control);
       
  1336       Node* other_ctrl = u->in(LoadBarrierNode::Control);
       
  1337 
       
  1338       Node* lca = phase->dom_lca(this_ctrl, other_ctrl);
       
  1339       bool ok = true;
       
  1340 
       
  1341       Node* proj1 = NULL;
       
  1342       Node* proj2 = NULL;
       
  1343 
       
  1344       while (this_ctrl != lca && ok) {
       
  1345         if (this_ctrl->in(0) != NULL &&
       
  1346             this_ctrl->in(0)->is_MultiBranch()) {
       
  1347           if (this_ctrl->in(0)->in(0) == lca) {
       
  1348             assert(proj1 == NULL, "");
       
  1349             assert(this_ctrl->is_Proj(), "");
       
  1350             proj1 = this_ctrl;
       
  1351           } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
       
  1352             ok = false;
       
  1353           }
   341           }
  1354         }
   342 
  1355         this_ctrl = phase->idom(this_ctrl);
   343           // Push predecessor blocks
  1356       }
   344           for (uint p = 1; p < block->num_preds(); ++p) {
  1357       while (other_ctrl != lca && ok) {
   345             Block* pred = cfg->get_block_for_node(block->pred(p));
  1358         if (other_ctrl->in(0) != NULL &&
   346             stack.push(pred);
  1359             other_ctrl->in(0)->is_MultiBranch()) {
       
  1360           if (other_ctrl->in(0)->in(0) == lca) {
       
  1361             assert(other_ctrl->is_Proj(), "");
       
  1362             assert(proj2 == NULL, "");
       
  1363             proj2 = other_ctrl;
       
  1364           } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
       
  1365             ok = false;
       
  1366           }
   347           }
  1367         }
   348         }
  1368         other_ctrl = phase->idom(other_ctrl);
   349 
  1369       }
   350         if (!safepoint_found) {
  1370       assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), "");
   351           load->set_barrier_data(ZLoadBarrierElided);
  1371       if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) {
   352         }
  1372         // That transformation may cause the Similar edge on dominated load barriers to be invalid
   353       }
  1373         lb->fix_similar_in_uses(&igvn);
   354     }
  1374         u->as_LoadBarrier()->fix_similar_in_uses(&igvn);
   355   }
  1375 
   356 }
  1376         Node* split = lca->unique_ctrl_out();
   357 
  1377         assert(split->in(0) == lca, "");
   358 // == Reduced spilling optimization ==
  1378 
   359 
  1379         Node* mem = lb->in(LoadBarrierNode::Memory);
   360 void ZBarrierSetC2::compute_liveness_at_stubs() const {
  1380         Node* m = find_dominating_memory(phase, mem, split, -1);
       
  1381         LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL);
       
  1382 
       
  1383         Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
       
  1384         igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control));
       
  1385         phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1);
       
  1386 
       
  1387         Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop);
       
  1388         replace_barrier(phase, lb, proj_oop);
       
  1389         replace_barrier(phase, u->as_LoadBarrier(), proj_oop);
       
  1390 
       
  1391         phase->recompute_dom_depth();
       
  1392 
       
  1393         return true;
       
  1394       }
       
  1395     }
       
  1396   }
       
  1397 
       
  1398   return false;
       
  1399 }
       
  1400 
       
  1401 void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
       
  1402   if (!Compile::current()->directive()->ZOptimizeLoadBarriersOption) {
       
  1403     return;
       
  1404   }
       
  1405 
       
  1406   if (!node->is_LoadBarrier()) {
       
  1407     return;
       
  1408   }
       
  1409 
       
  1410   if (!node->as_LoadBarrier()->has_true_uses()) {
       
  1411     return;
       
  1412   }
       
  1413 
       
  1414   if (replace_with_dominating_barrier(phase, node->as_LoadBarrier(), last_round)) {
       
  1415     return;
       
  1416   }
       
  1417 
       
  1418   if (split_barrier_thru_phi(phase, node->as_LoadBarrier())) {
       
  1419     return;
       
  1420   }
       
  1421 
       
  1422   if (move_out_of_loop(phase, node->as_LoadBarrier())) {
       
  1423     return;
       
  1424   }
       
  1425 
       
  1426   if (common_barriers(phase, node->as_LoadBarrier())) {
       
  1427     return;
       
  1428   }
       
  1429 }
       
  1430 
       
  1431 Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
       
  1432   Node* node = c;
       
  1433 
       
  1434   // 1. This step follows potential oop projections of a load barrier before expansion
       
  1435   if (node->is_Proj()) {
       
  1436     node = node->in(0);
       
  1437   }
       
  1438 
       
  1439   // 2. This step checks for unexpanded load barriers
       
  1440   if (node->is_LoadBarrier()) {
       
  1441     return node->in(LoadBarrierNode::Oop);
       
  1442   }
       
  1443 
       
  1444   // 3. This step checks for the phi corresponding to an optimized load barrier expansion
       
  1445   if (node->is_Phi()) {
       
  1446     PhiNode* phi = node->as_Phi();
       
  1447     Node* n = phi->in(1);
       
  1448     if (n != NULL && (n->is_LoadBarrierSlowReg() ||  n->is_LoadBarrierWeakSlowReg())) {
       
  1449       assert(c == node, "projections from step 1 should only be seen before macro expansion");
       
  1450       return phi->in(2);
       
  1451     }
       
  1452   }
       
  1453 
       
  1454   return c;
       
  1455 }
       
  1456 
       
  1457 bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const {
       
  1458   return type == T_OBJECT || type == T_ARRAY;
       
  1459 }
       
  1460 
       
  1461 bool ZBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
       
  1462   if (opcode != Op_LoadBarrierSlowReg &&
       
  1463       opcode != Op_LoadBarrierWeakSlowReg) {
       
  1464     return false;
       
  1465   }
       
  1466 
       
  1467 #ifdef ASSERT
       
  1468   if (VerifyOptoOopOffsets) {
       
  1469     MemNode* mem  = n->as_Mem();
       
  1470     // Check to see if address types have grounded out somehow.
       
  1471     const TypeInstPtr* tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
       
  1472     ciInstanceKlass* k = tp->klass()->as_instance_klass();
       
  1473     bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
       
  1474     assert(!tp || oop_offset_is_sane, "");
       
  1475   }
       
  1476 #endif
       
  1477 
       
  1478   return true;
       
  1479 }
       
  1480 
       
  1481 bool ZBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
       
  1482   if (opcode == Op_CallLeaf &&
       
  1483       (n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() ||
       
  1484        n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr())) {
       
  1485     mem_op = true;
       
  1486     mem_addr_idx = TypeFunc::Parms + 1;
       
  1487     return true;
       
  1488   }
       
  1489 
       
  1490   return false;
       
  1491 }
       
  1492 
       
  1493 // == Verification ==
       
  1494 
       
  1495 #ifdef ASSERT
       
  1496 
       
  1497 static bool look_for_barrier(Node* n, bool post_parse, VectorSet& visited) {
       
  1498   if (visited.test_set(n->_idx)) {
       
  1499     return true;
       
  1500   }
       
  1501 
       
  1502   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
       
  1503     Node* u = n->fast_out(i);
       
  1504     if (u->is_LoadBarrier()) {
       
  1505     } else if ((u->is_Phi() || u->is_CMove()) && !post_parse) {
       
  1506       if (!look_for_barrier(u, post_parse, visited)) {
       
  1507         return false;
       
  1508       }
       
  1509     } else if (u->Opcode() == Op_EncodeP || u->Opcode() == Op_DecodeN) {
       
  1510       if (!look_for_barrier(u, post_parse, visited)) {
       
  1511         return false;
       
  1512       }
       
  1513     } else if (u->Opcode() != Op_SCMemProj) {
       
  1514       tty->print("bad use"); u->dump();
       
  1515       return false;
       
  1516     }
       
  1517   }
       
  1518 
       
  1519   return true;
       
  1520 }
       
  1521 
       
  1522 void ZBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
       
  1523   if (phase == BarrierSetC2::BeforeCodeGen) return;
       
  1524   bool post_parse = phase == BarrierSetC2::BeforeOptimize;
       
  1525   verify_gc_barriers(post_parse);
       
  1526 }
       
  1527 
       
  1528 void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
       
  1529   ZBarrierSetC2State* s = state();
       
  1530   Compile* C = Compile::current();
       
  1531   ResourceMark rm;
   361   ResourceMark rm;
  1532   VectorSet visited(Thread::current()->resource_area());
   362   Compile* const C = Compile::current();
  1533   for (int i = 0; i < s->load_barrier_count(); i++) {
   363   Arena* const A = Thread::current()->resource_area();
  1534     LoadBarrierNode* n = s->load_barrier_node(i);
   364   PhaseCFG* const cfg = C->cfg();
  1535 
   365   PhaseRegAlloc* const regalloc = C->regalloc();
  1536     // The dominating barrier on the same address if it exists and
   366   RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
  1537     // this barrier must not be applied on the value from the same
   367   ZBarrierSetAssembler* const bs = ZBarrierSet::assembler();
  1538     // load otherwise the value is not reloaded before it's used the
   368   Block_List worklist;
  1539     // second time.
   369 
  1540     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
   370   for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
  1541            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
   371     new ((void*)(live + i)) RegMask();
  1542             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
   372     worklist.push(cfg->get_block(i));
  1543             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
   373   }
  1544            "broken similar edge");
   374 
  1545 
   375   while (worklist.size() > 0) {
  1546     assert(post_parse || n->as_LoadBarrier()->has_true_uses(),
   376     const Block* const block = worklist.pop();
  1547            "found unneeded load barrier");
   377     RegMask& old_live = live[block->_pre_order];
  1548 
   378     RegMask new_live;
  1549     // Several load barrier nodes chained through their Similar edge
   379 
  1550     // break the code that remove the barriers in final graph reshape.
   380     // Initialize to union of successors
  1551     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
   381     for (uint i = 0; i < block->_num_succs; i++) {
  1552            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
   382       const uint succ_id = block->_succs[i]->_pre_order;
  1553             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
   383       new_live.OR(live[succ_id]);
  1554            "chain of Similar load barriers");
   384     }
  1555 
   385 
  1556     if (!n->in(LoadBarrierNode::Similar)->is_top()) {
   386     // Walk block backwards, computing liveness
  1557       ResourceMark rm;
   387     for (int i = block->number_of_nodes() - 1; i >= 0; --i) {
  1558       Unique_Node_List wq;
   388       const Node* const node = block->get_node(i);
  1559       Node* other = n->in(LoadBarrierNode::Similar)->in(0);
   389 
  1560       wq.push(n);
   390       // Remove def bits
  1561       bool ok = true;
   391       const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
  1562       bool dom_found = false;
   392       const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
  1563       for (uint next = 0; next < wq.size(); ++next) {
   393       if (first != OptoReg::Bad) {
  1564         Node *n = wq.at(next);
   394         new_live.Remove(first);
  1565         assert(n->is_CFG(), "");
   395       }
  1566         assert(!n->is_SafePoint(), "");
   396       if (second != OptoReg::Bad) {
  1567 
   397         new_live.Remove(second);
  1568         if (n == other) {
   398       }
  1569           continue;
   399 
  1570         }
   400       // Add use bits
  1571 
   401       for (uint j = 1; j < node->req(); ++j) {
  1572         if (n->is_Region()) {
   402         const Node* const use = node->in(j);
  1573           for (uint i = 1; i < n->req(); i++) {
   403         const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
  1574             Node* m = n->in(i);
   404         const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
  1575             if (m != NULL) {
   405         if (first != OptoReg::Bad) {
  1576               wq.push(m);
   406           new_live.Insert(first);
  1577             }
   407         }
  1578           }
   408         if (second != OptoReg::Bad) {
  1579         } else {
   409           new_live.Insert(second);
  1580           Node* m = n->in(0);
   410         }
  1581           if (m != NULL) {
   411       }
  1582             wq.push(m);
   412 
  1583           }
   413       // If this node tracks liveness, update it
  1584         }
   414       RegMask* const regs = barrier_set_state()->live(node);
  1585       }
   415       if (regs != NULL) {
  1586     }
   416         regs->OR(new_live);
  1587 
   417       }
  1588     if (ZVerifyLoadBarriers) {
   418     }
  1589       if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) {
   419 
  1590         visited.Clear();
   420     // Now at block top, see if we have any changes
  1591         bool found = look_for_barrier(n, post_parse, visited);
   421     new_live.SUBTRACT(old_live);
  1592         if (!found) {
   422     if (new_live.is_NotEmpty()) {
  1593           n->dump(1);
   423       // Liveness has refined, update and propagate to prior blocks
  1594           n->dump(-3);
   424       old_live.OR(new_live);
  1595           stringStream ss;
   425       for (uint i = 1; i < block->num_preds(); ++i) {
  1596           C->method()->print_short_name(&ss);
   426         Block* const pred = cfg->get_block_for_node(block->pred(i));
  1597           tty->print_cr("-%s-", ss.as_string());
   427         worklist.push(pred);
  1598           assert(found, "");
   428       }
  1599         }
   429     }
  1600       }
   430   }
  1601     }
   431 }
  1602   }
       
  1603 }
       
  1604 
       
  1605 #endif
       
  1606 
       
  1607 bool ZBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
       
  1608   switch (opcode) {
       
  1609     case Op_LoadBarrierSlowReg:
       
  1610     case Op_LoadBarrierWeakSlowReg:
       
  1611       conn_graph->add_objload_to_connection_graph(n, delayed_worklist);
       
  1612       return true;
       
  1613 
       
  1614     case Op_Proj:
       
  1615       if (n->as_Proj()->_con != LoadBarrierNode::Oop || !n->in(0)->is_LoadBarrier()) {
       
  1616         return false;
       
  1617       }
       
  1618       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
       
  1619       return true;
       
  1620   }
       
  1621 
       
  1622   return false;
       
  1623 }
       
  1624 
       
  1625 bool ZBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const {
       
  1626   switch (opcode) {
       
  1627     case Op_LoadBarrierSlowReg:
       
  1628     case Op_LoadBarrierWeakSlowReg:
       
  1629       if (gvn->type(n)->make_ptr() == NULL) {
       
  1630         return false;
       
  1631       }
       
  1632       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
       
  1633       return true;
       
  1634 
       
  1635     case Op_Proj:
       
  1636       if (n->as_Proj()->_con != LoadBarrierNode::Oop || !n->in(0)->is_LoadBarrier()) {
       
  1637         return false;
       
  1638       }
       
  1639       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL);
       
  1640       return true;
       
  1641   }
       
  1642 
       
  1643   return false;
       
  1644 }