src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 55079 de371e2d1acc
child 58679 9c3209ff7550
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Thu Oct 17 20:27:44 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Thu Oct 17 20:53:35 2019 +0100
@@ -179,10 +179,6 @@
       if (trace) {tty->print_cr("NULL");}
     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
       if (trace) {tty->print_cr("Non oop");}
-    } else if (t == ShenandoahLoad && ShenandoahOptimizeStableFinals &&
-               in->bottom_type()->make_ptr()->isa_aryptr() &&
-               in->bottom_type()->make_ptr()->is_aryptr()->is_stable()) {
-      if (trace) {tty->print_cr("Stable array load");}
     } else {
       if (in->is_ConstraintCast()) {
         in = in->in(1);
@@ -323,34 +319,8 @@
                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
           if (trace) {tty->print_cr("Reference.get()");}
-        } else {
-          bool verify = true;
-          if (adr_type->isa_instptr()) {
-            const TypeInstPtr* tinst = adr_type->is_instptr();
-            ciKlass* k = tinst->klass();
-            assert(k->is_instance_klass(), "");
-            ciInstanceKlass* ik = (ciInstanceKlass*)k;
-            int offset = adr_type->offset();
-
-            if ((ik->debug_final_field_at(offset) && ShenandoahOptimizeInstanceFinals) ||
-                (ik->debug_stable_field_at(offset) && ShenandoahOptimizeStableFinals)) {
-              if (trace) {tty->print_cr("Final/stable");}
-              verify = false;
-            } else if (k == ciEnv::current()->Class_klass() &&
-                       tinst->const_oop() != NULL &&
-                       tinst->offset() >= (ik->size_helper() * wordSize)) {
-              ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
-              ciField* field = k->get_field_by_offset(tinst->offset(), true);
-              if ((ShenandoahOptimizeStaticFinals && field->is_final()) ||
-                  (ShenandoahOptimizeStableFinals && field->is_stable())) {
-                verify = false;
-              }
-            }
-          }
-
-          if (verify && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
-            report_verify_failure("Shenandoah verification: Load should have barriers", n);
-          }
+        } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
+          report_verify_failure("Shenandoah verification: Load should have barriers", n);
         }
       }
     } else if (n->is_Store()) {
@@ -670,42 +640,6 @@
         }
       }
     }
-    for( uint i = 0; i < n->len(); ++i ) {
-      Node *m = n->in(i);
-      if (m == NULL) continue;
-
-      // In most cases, inputs should be known to be non null. If it's
-      // not the case, it could be a missing cast_not_null() in an
-      // intrinsic or support might be needed in AddPNode::Ideal() to
-      // avoid a NULL+offset input.
-      if (!(n->is_Phi() ||
-            (n->is_SafePoint() && (!n->is_CallRuntime() || !strcmp(n->as_Call()->_name, "shenandoah_wb_pre") || !strcmp(n->as_Call()->_name, "unsafe_arraycopy"))) ||
-            n->Opcode() == Op_CmpP ||
-            n->Opcode() == Op_CmpN ||
-            (n->Opcode() == Op_StoreP && i == StoreNode::ValueIn) ||
-            (n->Opcode() == Op_StoreN && i == StoreNode::ValueIn) ||
-            n->is_ConstraintCast() ||
-            n->Opcode() == Op_Return ||
-            n->Opcode() == Op_Conv2B ||
-            n->is_AddP() ||
-            n->Opcode() == Op_CMoveP ||
-            n->Opcode() == Op_CMoveN ||
-            n->Opcode() == Op_Rethrow ||
-            n->is_MemBar() ||
-            n->is_Mem() ||
-            n->Opcode() == Op_AryEq ||
-            n->Opcode() == Op_SCMemProj ||
-            n->Opcode() == Op_EncodeP ||
-            n->Opcode() == Op_DecodeN ||
-            n->Opcode() == Op_ShenandoahEnqueueBarrier ||
-            n->Opcode() == Op_ShenandoahLoadReferenceBarrier)) {
-        if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) {
-          report_verify_failure("Shenandoah verification: null input", n, m);
-        }
-      }
-
-      wq.push(m);
-    }
   }
 
   if (verify_no_useless_barrier) {
@@ -1082,7 +1016,7 @@
   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
 }
 
-void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
   IdealLoopTree*loop = phase->get_loop(ctrl);
   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst();
 
@@ -1093,13 +1027,22 @@
   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
   phase->register_new_node(mm, ctrl);
 
-  Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM);
+  address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
+          CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_narrow) :
+          CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup);
+
+  address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)
+                               : target;
+  const char* name = is_native ? "oop_load_from_native_barrier" : "load_reference_barrier";
+  Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
+
   call->init_req(TypeFunc::Control, ctrl);
   call->init_req(TypeFunc::I_O, phase->C->top());
   call->init_req(TypeFunc::Memory, mm);
   call->init_req(TypeFunc::FramePtr, phase->C->top());
   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
   call->init_req(TypeFunc::Parms, val);
+  call->init_req(TypeFunc::Parms+1, load_addr);
   phase->register_control(call, loop, ctrl);
   ctrl = new ProjNode(call, TypeFunc::Control);
   phase->register_control(ctrl, loop, call);
@@ -1329,6 +1272,38 @@
     }
     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
+      if (call->entry_point() == OptoRuntime::rethrow_stub()) {
+        // The rethrow call may have too many projections to be
+        // properly handled here. Given there's no reason for a
+        // barrier to depend on the call, move it above the call
+        stack.push(lrb, 0);
+        do {
+          Node* n = stack.node();
+          uint idx = stack.index();
+          if (idx < n->req()) {
+            Node* in = n->in(idx);
+            stack.set_index(idx+1);
+            if (in != NULL) {
+              if (phase->has_ctrl(in)) {
+                if (phase->is_dominator(call, phase->get_ctrl(in))) {
+#ifdef ASSERT
+                  for (uint i = 0; i < stack.size(); i++) {
+                    assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
+                  }
+#endif
+                  stack.push(in, 0);
+                }
+              } else {
+                assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
+              }
+            }
+          } else {
+            phase->set_ctrl(n, call->in(0));
+            stack.pop();
+          }
+        } while(stack.size() > 0);
+        continue;
+      }
       CallProjections projs;
       call->extract_projections(&projs, false, false);
 
@@ -1464,7 +1439,7 @@
     assert(val->bottom_type()->make_oopptr(), "need oop");
     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
 
-    enum { _heap_stable = 1, _not_cset, _fwded, _evac_path, _null_path, PATH_LIMIT };
+    enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
     Node* region = new RegionNode(PATH_LIMIT);
     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
@@ -1514,49 +1489,44 @@
       IfNode* iff = unc_ctrl->in(0)->as_If();
       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
     }
-    Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(oopDesc::mark_offset_in_bytes()));
-    phase->register_new_node(addr, ctrl);
-    assert(new_val->bottom_type()->isa_oopptr(), "what else?");
-    Node* markword = new LoadXNode(ctrl, raw_mem, addr, TypeRawPtr::BOTTOM, TypeX_X, MemNode::unordered);
-    phase->register_new_node(markword, ctrl);
-
-    // Test if object is forwarded. This is the case if lowest two bits are set.
-    Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markOopDesc::lock_mask_in_place));
-    phase->register_new_node(masked, ctrl);
-    Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markOopDesc::marked_value));
-    phase->register_new_node(cmp, ctrl);
-
-    // Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr
-    Node* bol = new BoolNode(cmp, BoolTest::eq); // Equals 3 means it's forwarded
-    phase->register_new_node(bol, ctrl);
-
-    IfNode* iff = new IfNode(ctrl, bol, PROB_LIKELY(0.999), COUNT_UNKNOWN);
-    phase->register_control(iff, loop, ctrl);
-    Node* if_fwd = new IfTrueNode(iff);
-    phase->register_control(if_fwd, loop, iff);
-    Node* if_not_fwd = new IfFalseNode(iff);
-    phase->register_control(if_not_fwd, loop, iff);
-
-    // Decode forward pointer: since we already have the lowest bits, we can just subtract them
-    // from the mark word without the need for large immediate mask.
-    Node* masked2 = new SubXNode(markword, masked);
-    phase->register_new_node(masked2, if_fwd);
-    Node* fwdraw = new CastX2PNode(masked2);
-    fwdraw->init_req(0, if_fwd);
-    phase->register_new_node(fwdraw, if_fwd);
-    Node* fwd = new CheckCastPPNode(NULL, fwdraw, val->bottom_type());
-    phase->register_new_node(fwd, if_fwd);
-
-    // Wire up not-equal-path in slots 3.
-    region->init_req(_fwded, if_fwd);
-    val_phi->init_req(_fwded, fwd);
-    raw_mem_phi->init_req(_fwded, raw_mem);
 
     // Call lrb-stub and wire up that path in slots 4
     Node* result_mem = NULL;
-    ctrl = if_not_fwd;
-    fwd = new_val;
-    call_lrb_stub(ctrl, fwd, result_mem, raw_mem, phase);
+
+    Node* fwd = new_val;
+    Node* addr;
+    if (ShenandoahSelfFixing) {
+      VectorSet visited(Thread::current()->resource_area());
+      addr = get_load_addr(phase, visited, lrb);
+    } else {
+      addr = phase->igvn().zerocon(T_OBJECT);
+    }
+    if (addr->Opcode() == Op_AddP) {
+      Node* orig_base = addr->in(AddPNode::Base);
+      Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
+      phase->register_new_node(base, ctrl);
+      if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
+        // Field access
+        addr = addr->clone();
+        addr->set_req(AddPNode::Base, base);
+        addr->set_req(AddPNode::Address, base);
+        phase->register_new_node(addr, ctrl);
+      } else {
+        Node* addr2 = addr->in(AddPNode::Address);
+        if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
+              addr2->in(AddPNode::Base) == orig_base) {
+          addr2 = addr2->clone();
+          addr2->set_req(AddPNode::Base, base);
+          addr2->set_req(AddPNode::Address, base);
+          phase->register_new_node(addr2, ctrl);
+          addr = addr->clone();
+          addr->set_req(AddPNode::Base, base);
+          addr->set_req(AddPNode::Address, addr2);
+          phase->register_new_node(addr, ctrl);
+        }
+      }
+    }
+    call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, lrb->is_native(), phase);
     region->init_req(_evac_path, ctrl);
     val_phi->init_req(_evac_path, fwd);
     raw_mem_phi->init_req(_evac_path, result_mem);
@@ -1759,6 +1729,74 @@
 
 }
 
+Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
+  if (visited.test_set(in->_idx)) {
+    return NULL;
+  }
+  switch (in->Opcode()) {
+    case Op_Proj:
+      return get_load_addr(phase, visited, in->in(0));
+    case Op_CastPP:
+    case Op_CheckCastPP:
+    case Op_DecodeN:
+    case Op_EncodeP:
+      return get_load_addr(phase, visited, in->in(1));
+    case Op_LoadN:
+    case Op_LoadP:
+      return in->in(MemNode::Address);
+    case Op_CompareAndExchangeN:
+    case Op_CompareAndExchangeP:
+    case Op_GetAndSetN:
+    case Op_GetAndSetP:
+    case Op_ShenandoahCompareAndExchangeP:
+    case Op_ShenandoahCompareAndExchangeN:
+      // Those instructions would just have stored a different
+      // value into the field. No use to attempt to fix it at this point.
+      return phase->igvn().zerocon(T_OBJECT);
+    case Op_CMoveP:
+    case Op_CMoveN: {
+      Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
+      Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
+      // Handle unambiguous cases: single address reported on both branches.
+      if (t != NULL && f == NULL) return t;
+      if (t == NULL && f != NULL) return f;
+      if (t != NULL && t == f)    return t;
+      // Ambiguity.
+      return phase->igvn().zerocon(T_OBJECT);
+    }
+    case Op_Phi: {
+      Node* addr = NULL;
+      for (uint i = 1; i < in->req(); i++) {
+        Node* addr1 = get_load_addr(phase, visited, in->in(i));
+        if (addr == NULL) {
+          addr = addr1;
+        }
+        if (addr != addr1) {
+          return phase->igvn().zerocon(T_OBJECT);
+        }
+      }
+      return addr;
+    }
+    case Op_ShenandoahLoadReferenceBarrier:
+      return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
+    case Op_ShenandoahEnqueueBarrier:
+      return get_load_addr(phase, visited, in->in(1));
+    case Op_CallDynamicJava:
+    case Op_CallLeaf:
+    case Op_CallStaticJava:
+    case Op_ConN:
+    case Op_ConP:
+    case Op_Parm:
+      return phase->igvn().zerocon(T_OBJECT);
+    default:
+#ifdef ASSERT
+      fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
+#endif
+      return phase->igvn().zerocon(T_OBJECT);
+  }
+
+}
+
 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
   IdealLoopTree *loop = phase->get_loop(iff);
   Node* loop_head = loop->_head;
@@ -2977,10 +3015,11 @@
                u->Opcode() == Op_Rethrow ||
                u->Opcode() == Op_Return ||
                u->Opcode() == Op_SafePoint ||
+               u->Opcode() == Op_StoreIConditional ||
                u->Opcode() == Op_StoreLConditional ||
                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
-               u->Opcode() == Op_CallLeaf, "");
+               u->Opcode() == Op_CallLeaf, "%s", u->Name());
         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
           if (mm == NULL) {
             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
@@ -2998,11 +3037,28 @@
   }
 }
 
-ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
-: Node(ctrl, obj) {
+ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, bool native)
+: Node(ctrl, obj), _native(native) {
   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
 }
 
+bool ShenandoahLoadReferenceBarrierNode::is_native() const {
+  return _native;
+}
+
+uint ShenandoahLoadReferenceBarrierNode::size_of() const {
+  return sizeof(*this);
+}
+
+uint ShenandoahLoadReferenceBarrierNode::hash() const {
+  return Node::hash() + (_native ? 1 : 0);
+}
+
+bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
+  return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
+         _native == ((const ShenandoahLoadReferenceBarrierNode&)n)._native;
+}
+
 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
     return Type::TOP;
@@ -3102,11 +3158,14 @@
       return needs_barrier_impl(phase, n->in(1), visited);
     case Op_LoadN:
       return true;
+    case Op_CMoveN:
     case Op_CMoveP:
       return needs_barrier_impl(phase, n->in(2), visited) ||
              needs_barrier_impl(phase, n->in(3), visited);
     case Op_ShenandoahEnqueueBarrier:
       return needs_barrier_impl(phase, n->in(1), visited);
+    case Op_CreateEx:
+      return false;
     default:
       break;
   }
@@ -3125,6 +3184,10 @@
   Unique_Node_List visited;
   Node_Stack stack(0);
   stack.push(this, 0);
+
+  // Look for strongest strength: go over nodes looking for STRONG ones.
+  // Stop once we encountered STRONG. Otherwise, walk until we ran out of nodes,
+  // and then the overall strength is NONE.
   Strength strength = NONE;
   while (strength != STRONG && stack.size() > 0) {
     Node* n = stack.node();
@@ -3135,22 +3198,7 @@
     visited.push(n);
     bool visit_users = false;
     switch (n->Opcode()) {
-      case Op_StoreN:
-      case Op_StoreP: {
-        strength = STRONG;
-        break;
-      }
-      case Op_CmpP: {
-        if (!n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) &&
-            !n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
-          strength = STRONG;
-        }
-        break;
-      }
-      case Op_CallStaticJava: {
-        strength = STRONG;
-        break;
-      }
+      case Op_CallStaticJava:
       case Op_CallDynamicJava:
       case Op_CallLeaf:
       case Op_CallLeafNoFP:
@@ -3200,6 +3248,9 @@
       case Op_StoreL:
       case Op_StoreLConditional:
       case Op_StoreI:
+      case Op_StoreIConditional:
+      case Op_StoreN:
+      case Op_StoreP:
       case Op_StoreVector:
       case Op_StrInflatedCopy:
       case Op_StrCompressedCopy:
@@ -3207,8 +3258,24 @@
       case Op_CastP2X:
       case Op_SafePoint:
       case Op_EncodeISOArray:
+      case Op_AryEq:
+      case Op_StrEquals:
+      case Op_StrComp:
+      case Op_StrIndexOf:
+      case Op_StrIndexOfChar:
+      case Op_HasNegatives:
+        // Known to require barriers
         strength = STRONG;
         break;
+      case Op_CmpP: {
+        if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
+            n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
+          // One of the sides is known null, no need for barrier.
+        } else {
+          strength = STRONG;
+        }
+        break;
+      }
       case Op_LoadB:
       case Op_LoadUB:
       case Op_LoadUS:
@@ -3226,41 +3293,20 @@
         ciField* field = alias_type->field();
         bool is_static = field != NULL && field->is_static();
         bool is_final = field != NULL && field->is_final();
-        bool is_stable = field != NULL && field->is_stable();
+
         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
-          // Leave strength as is.
-        } else if (ShenandoahOptimizeInstanceFinals && !is_static && is_final) {
-          // Leave strength as is.
-        } else if (ShenandoahOptimizeStableFinals && (is_stable || (adr_type->isa_aryptr() && adr_type->isa_aryptr()->is_stable()))) {
-          // Leave strength as is.
+          // Loading the constant does not require barriers: it should be handled
+          // as part of GC roots already.
         } else {
-          strength = WEAK;
+          strength = STRONG;
         }
         break;
       }
-      case Op_AryEq: {
-        Node* n1 = n->in(2);
-        Node* n2 = n->in(3);
-        if (!ShenandoahOptimizeStableFinals ||
-            !n1->bottom_type()->isa_aryptr() || !n1->bottom_type()->isa_aryptr()->is_stable() ||
-            !n2->bottom_type()->isa_aryptr() || !n2->bottom_type()->isa_aryptr()->is_stable()) {
-          strength = WEAK;
-        }
-        break;
-      }
-      case Op_StrEquals:
-      case Op_StrComp:
-      case Op_StrIndexOf:
-      case Op_StrIndexOfChar:
-        if (!ShenandoahOptimizeStableFinals) {
-           strength = WEAK;
-        }
-        break;
       case Op_Conv2B:
       case Op_LoadRange:
       case Op_LoadKlass:
       case Op_LoadNKlass:
-        // NONE, i.e. leave current strength as is
+        // Do not require barriers
         break;
       case Op_AddP:
       case Op_CheckCastPP:
@@ -3268,26 +3314,19 @@
       case Op_CMoveP:
       case Op_Phi:
       case Op_ShenandoahLoadReferenceBarrier:
+        // Whether or not these need the barriers depends on their users
         visit_users = true;
         break;
       default: {
 #ifdef ASSERT
-        tty->print_cr("Unknown node in get_barrier_strength:");
-        n->dump(1);
-        ShouldNotReachHere();
+        fatal("Unknown node in get_barrier_strength: %s", NodeClassNames[n->Opcode()]);
 #else
+        // Default to strong: better to have excess barriers, rather than miss some.
         strength = STRONG;
 #endif
       }
     }
-#ifdef ASSERT
-/*
-    if (strength == STRONG) {
-      tty->print("strengthening node: ");
-      n->dump();
-    }
-    */
-#endif
+
     stack.pop();
     if (visit_users) {
       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {