hotspot/src/share/vm/opto/memnode.cpp
changeset 30629 b6e5ad2f18d5
parent 30300 4b12a5b40064
child 31035 0f0743952c41
equal deleted inserted replaced
30628:3c15b4a3bf4d 30629:b6e5ad2f18d5
    26 #include "classfile/systemDictionary.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "compiler/compileLog.hpp"
    27 #include "compiler/compileLog.hpp"
    28 #include "memory/allocation.inline.hpp"
    28 #include "memory/allocation.inline.hpp"
    29 #include "oops/objArrayKlass.hpp"
    29 #include "oops/objArrayKlass.hpp"
    30 #include "opto/addnode.hpp"
    30 #include "opto/addnode.hpp"
       
    31 #include "opto/arraycopynode.hpp"
    31 #include "opto/cfgnode.hpp"
    32 #include "opto/cfgnode.hpp"
    32 #include "opto/compile.hpp"
    33 #include "opto/compile.hpp"
    33 #include "opto/connode.hpp"
    34 #include "opto/connode.hpp"
    34 #include "opto/convertnode.hpp"
    35 #include "opto/convertnode.hpp"
    35 #include "opto/loopnode.hpp"
    36 #include "opto/loopnode.hpp"
   105 
   106 
   106 extern void print_alias_types();
   107 extern void print_alias_types();
   107 
   108 
   108 #endif
   109 #endif
   109 
   110 
       
   111 static bool membar_for_arraycopy_helper(const TypeOopPtr *t_oop, MergeMemNode* mm, PhaseTransform *phase) {
       
   112   if (mm->memory_at(Compile::AliasIdxRaw)->is_Proj()) {
       
   113     Node* n = mm->memory_at(Compile::AliasIdxRaw)->in(0);
       
   114     if ((n->is_ArrayCopy() && n->as_ArrayCopy()->may_modify(t_oop, phase)) ||
       
   115         (n->is_CallLeaf() && n->as_CallLeaf()->may_modify(t_oop, phase))) {
       
   116       return true;
       
   117     }
       
   118   }
       
   119   return false;
       
   120 }
       
   121 
       
   122 static bool membar_for_arraycopy(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase) {
       
   123   Node* mem = mb->in(TypeFunc::Memory);
       
   124   if (mem->is_MergeMem()) {
       
   125     return membar_for_arraycopy_helper(t_oop, mem->as_MergeMem(), phase);
       
   126   } else if (mem->is_Phi()) {
       
   127     // after macro expansion of an ArrayCopyNode we may have a Phi
       
   128     for (uint i = 1; i < mem->req(); i++) {
       
   129       if (mem->in(i) != NULL && mem->in(i)->is_MergeMem() && membar_for_arraycopy_helper(t_oop, mem->in(i)->as_MergeMem(), phase)) {
       
   130         return true;
       
   131       }
       
   132     }
       
   133   }
       
   134   return false;
       
   135 }
       
   136 
   110 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
   137 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
   111   assert((t_oop != NULL), "sanity");
   138   assert((t_oop != NULL), "sanity");
   112   bool is_instance = t_oop->is_known_instance_field();
   139   bool is_instance = t_oop->is_known_instance_field();
   113   bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
   140   bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
   114                              (load != NULL) && load->is_Load() &&
   141                              (load != NULL) && load->is_Load() &&
   127     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
   154     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
   128       Node *proj_in = result->in(0);
   155       Node *proj_in = result->in(0);
   129       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
   156       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
   130         break;  // hit one of our sentinels
   157         break;  // hit one of our sentinels
   131       } else if (proj_in->is_Call()) {
   158       } else if (proj_in->is_Call()) {
       
   159         // ArrayCopyNodes processed here as well
   132         CallNode *call = proj_in->as_Call();
   160         CallNode *call = proj_in->as_Call();
   133         if (!call->may_modify(t_oop, phase)) { // returns false for instances
   161         if (!call->may_modify(t_oop, phase)) { // returns false for instances
   134           result = call->in(TypeFunc::Memory);
   162           result = call->in(TypeFunc::Memory);
   135         }
   163         }
   136       } else if (proj_in->is_Initialize()) {
   164       } else if (proj_in->is_Initialize()) {
   137         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
   165         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
   138         // Stop if this is the initialization for the object instance which
   166         // Stop if this is the initialization for the object instance which
   139         // which contains this memory slice, otherwise skip over it.
   167         // contains this memory slice, otherwise skip over it.
   140         if ((alloc == NULL) || (alloc->_idx == instance_id)) {
   168         if ((alloc == NULL) || (alloc->_idx == instance_id)) {
   141           break;
   169           break;
   142         }
   170         }
   143         if (is_instance) {
   171         if (is_instance) {
   144           result = proj_in->in(TypeFunc::Memory);
   172           result = proj_in->in(TypeFunc::Memory);
   148           if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
   176           if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
   149             result = proj_in->in(TypeFunc::Memory); // not related allocation
   177             result = proj_in->in(TypeFunc::Memory); // not related allocation
   150           }
   178           }
   151         }
   179         }
   152       } else if (proj_in->is_MemBar()) {
   180       } else if (proj_in->is_MemBar()) {
       
   181         if (membar_for_arraycopy(t_oop, proj_in->as_MemBar(), phase)) {
       
   182           break;
       
   183         }
   153         result = proj_in->in(TypeFunc::Memory);
   184         result = proj_in->in(TypeFunc::Memory);
   154       } else {
   185       } else {
   155         assert(false, "unexpected projection");
   186         assert(false, "unexpected projection");
   156       }
   187       }
   157     } else if (result->is_ClearArray()) {
   188     } else if (result->is_ClearArray()) {
   475   }
   506   }
   476   return false;
   507   return false;
   477 }
   508 }
   478 
   509 
   479 
   510 
       
   511 // Find an arraycopy that must have set (can_see_stored_value=true) or
       
   512 // could have set (can_see_stored_value=false) the value for this load
       
   513 Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const {
       
   514   if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
       
   515                                                mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
       
   516     Node* mb = mem->in(0);
       
   517     if (mb->in(0) != NULL && mb->in(0)->is_Proj() &&
       
   518         mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) {
       
   519       ArrayCopyNode* ac = mb->in(0)->in(0)->as_ArrayCopy();
       
   520       if (ac->is_clonebasic()) {
       
   521         intptr_t offset;
       
   522         AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase, offset);
       
   523         assert(alloc != NULL && alloc->initialization()->is_complete_with_arraycopy(), "broken allocation");
       
   524         if (alloc == ld_alloc) {
       
   525           return ac;
       
   526         }
       
   527       }
       
   528     }
       
   529   } else if (mem->is_Proj() && mem->in(0) != NULL && mem->in(0)->is_ArrayCopy()) {
       
   530     ArrayCopyNode* ac = mem->in(0)->as_ArrayCopy();
       
   531 
       
   532     if (ac->is_arraycopy_validated() ||
       
   533         ac->is_copyof_validated() ||
       
   534         ac->is_copyofrange_validated()) {
       
   535       Node* ld_addp = in(MemNode::Address);
       
   536       if (ld_addp->is_AddP()) {
       
   537         Node* ld_base = ld_addp->in(AddPNode::Address);
       
   538         Node* ld_offs = ld_addp->in(AddPNode::Offset);
       
   539 
       
   540         Node* dest = ac->in(ArrayCopyNode::Dest);
       
   541 
       
   542         if (dest == ld_base) {
       
   543           Node* src_pos = ac->in(ArrayCopyNode::SrcPos);
       
   544           Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
       
   545           Node* len = ac->in(ArrayCopyNode::Length);
       
   546 
       
   547           const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
       
   548           const TypeX *ld_offs_t = phase->type(ld_offs)->isa_intptr_t();
       
   549           const TypeInt *len_t = phase->type(len)->isa_int();
       
   550           const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
       
   551 
       
   552           if (dest_pos_t != NULL && ld_offs_t != NULL && len_t != NULL && ary_t != NULL) {
       
   553             BasicType ary_elem  = ary_t->klass()->as_array_klass()->element_type()->basic_type();
       
   554             uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
       
   555             uint elemsize = type2aelembytes(ary_elem);
       
   556 
       
   557             intptr_t dest_pos_plus_len_lo = (((intptr_t)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
       
   558             intptr_t dest_pos_plus_len_hi = (((intptr_t)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
       
   559             intptr_t dest_pos_lo = ((intptr_t)dest_pos_t->_lo) * elemsize + header;
       
   560             intptr_t dest_pos_hi = ((intptr_t)dest_pos_t->_hi) * elemsize + header;
       
   561 
       
   562             if (can_see_stored_value) {
       
   563               if (ld_offs_t->_lo >= dest_pos_hi && ld_offs_t->_hi < dest_pos_plus_len_lo) {
       
   564                 return ac;
       
   565               }
       
   566             } else {
       
   567               if (ld_offs_t->_hi < dest_pos_lo || ld_offs_t->_lo >= dest_pos_plus_len_hi) {
       
   568                 mem = ac->in(TypeFunc::Memory);
       
   569               }
       
   570               return ac;
       
   571             }
       
   572           }
       
   573         }
       
   574       }
       
   575     }
       
   576   }
       
   577   return NULL;
       
   578 }
       
   579 
   480 // The logic for reordering loads and stores uses four steps:
   580 // The logic for reordering loads and stores uses four steps:
   481 // (a) Walk carefully past stores and initializations which we
   581 // (a) Walk carefully past stores and initializations which we
   482 //     can prove are independent of this load.
   582 //     can prove are independent of this load.
   483 // (b) Observe that the next memory state makes an exact match
   583 // (b) Observe that the next memory state makes an exact match
   484 //     with self (load or store), and locate the relevant store.
   584 //     with self (load or store), and locate the relevant store.
   508 
   608 
   509   int cnt = 50;             // Cycle limiter
   609   int cnt = 50;             // Cycle limiter
   510   for (;;) {                // While we can dance past unrelated stores...
   610   for (;;) {                // While we can dance past unrelated stores...
   511     if (--cnt < 0)  break;  // Caught in cycle or a complicated dance?
   611     if (--cnt < 0)  break;  // Caught in cycle or a complicated dance?
   512 
   612 
       
   613     Node* prev = mem;
   513     if (mem->is_Store()) {
   614     if (mem->is_Store()) {
   514       Node* st_adr = mem->in(MemNode::Address);
   615       Node* st_adr = mem->in(MemNode::Address);
   515       intptr_t st_offset = 0;
   616       intptr_t st_offset = 0;
   516       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
   617       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
   517       if (st_base == NULL)
   618       if (st_base == NULL)
   578       if (known_identical) {
   679       if (known_identical) {
   579         // From caller, can_see_stored_value will consult find_captured_store.
   680         // From caller, can_see_stored_value will consult find_captured_store.
   580         return mem;         // let caller handle steps (c), (d)
   681         return mem;         // let caller handle steps (c), (d)
   581       }
   682       }
   582 
   683 
       
   684     } else if (find_previous_arraycopy(phase, alloc, mem, false) != NULL) {
       
   685       if (prev != mem) {
       
   686         // Found an arraycopy but it doesn't affect that load
       
   687         continue;
       
   688       }
       
   689       // Found an arraycopy that may affect that load
       
   690       return mem;
   583     } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
   691     } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
   584       // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
   692       // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
   585       if (mem->is_Proj() && mem->in(0)->is_Call()) {
   693       if (mem->is_Proj() && mem->in(0)->is_Call()) {
       
   694         // ArrayCopyNodes processed here as well.
   586         CallNode *call = mem->in(0)->as_Call();
   695         CallNode *call = mem->in(0)->as_Call();
   587         if (!call->may_modify(addr_t, phase)) {
   696         if (!call->may_modify(addr_t, phase)) {
   588           mem = call->in(TypeFunc::Memory);
   697           mem = call->in(TypeFunc::Memory);
   589           continue;         // (a) advance through independent call memory
   698           continue;         // (a) advance through independent call memory
   590         }
   699         }
   591       } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
   700       } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
       
   701         if (membar_for_arraycopy(addr_t, mem->in(0)->as_MemBar(), phase)) {
       
   702           break;
       
   703         }
   592         mem = mem->in(0)->in(TypeFunc::Memory);
   704         mem = mem->in(0)->in(TypeFunc::Memory);
   593         continue;           // (a) advance through independent MemBar memory
   705         continue;           // (a) advance through independent MemBar memory
   594       } else if (mem->is_ClearArray()) {
   706       } else if (mem->is_ClearArray()) {
   595         if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
   707         if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
   596           // (the call updated 'mem' value)
   708           // (the call updated 'mem' value)
   757     return (eliminate_boxing && non_volatile) || is_stable_ary;
   869     return (eliminate_boxing && non_volatile) || is_stable_ary;
   758   }
   870   }
   759 
   871 
   760   return false;
   872   return false;
   761 }
   873 }
       
   874 
       
   875 // Is the value loaded previously stored by an arraycopy? If so return
       
   876 // a load node that reads from the source array so we may be able to
       
   877 // optimize out the ArrayCopy node later.
       
   878 Node* MemNode::can_see_arraycopy_value(Node* st, PhaseTransform* phase) const {
       
   879   Node* ld_adr = in(MemNode::Address);
       
   880   intptr_t ld_off = 0;
       
   881   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
       
   882   Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
       
   883   if (ac != NULL) {
       
   884     assert(ac->is_ArrayCopy(), "what kind of node can this be?");
       
   885     assert(is_Load(), "only for loads");
       
   886 
       
   887     if (ac->as_ArrayCopy()->is_clonebasic()) {
       
   888       assert(ld_alloc != NULL, "need an alloc");
       
   889       Node* ld = clone();
       
   890       Node* addp = in(MemNode::Address)->clone();
       
   891       assert(addp->is_AddP(), "address must be addp");
       
   892       assert(addp->in(AddPNode::Base) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Base), "strange pattern");
       
   893       assert(addp->in(AddPNode::Address) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Address), "strange pattern");
       
   894       addp->set_req(AddPNode::Base, ac->in(ArrayCopyNode::Src)->in(AddPNode::Base));
       
   895       addp->set_req(AddPNode::Address, ac->in(ArrayCopyNode::Src)->in(AddPNode::Address));
       
   896       ld->set_req(MemNode::Address, phase->transform(addp));
       
   897       if (in(0) != NULL) {
       
   898         assert(ld_alloc->in(0) != NULL, "alloc must have control");
       
   899         ld->set_req(0, ld_alloc->in(0));
       
   900       }
       
   901       return ld;
       
   902     } else {
       
   903       Node* ld = clone();
       
   904       Node* addp = in(MemNode::Address)->clone();
       
   905       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
       
   906       addp->set_req(AddPNode::Base, ac->in(ArrayCopyNode::Src));
       
   907       addp->set_req(AddPNode::Address, ac->in(ArrayCopyNode::Src));
       
   908 
       
   909       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
       
   910       BasicType ary_elem  = ary_t->klass()->as_array_klass()->element_type()->basic_type();
       
   911       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
       
   912       uint shift  = exact_log2(type2aelembytes(ary_elem));
       
   913 
       
   914       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
       
   915 #ifdef _LP64
       
   916       diff = phase->transform(new ConvI2LNode(diff));
       
   917 #endif
       
   918       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
       
   919 
       
   920       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
       
   921       addp->set_req(AddPNode::Offset, offset);
       
   922       ld->set_req(MemNode::Address, phase->transform(addp));
       
   923 
       
   924       if (in(0) != NULL) {
       
   925         assert(ac->in(0) != NULL, "alloc must have control");
       
   926         ld->set_req(0, ac->in(0));
       
   927       }
       
   928       return ld;
       
   929     }
       
   930   }
       
   931   return NULL;
       
   932 }
       
   933 
   762 
   934 
   763 //---------------------------can_see_stored_value------------------------------
   935 //---------------------------can_see_stored_value------------------------------
   764 // This routine exists to make sure this set of tests is done the same
   936 // This routine exists to make sure this set of tests is done the same
   765 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
   937 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
   766 // will change the graph shape in a way which makes memory alive twice at the
   938 // will change the graph shape in a way which makes memory alive twice at the
   791                      opc == Op_MemBarAcquireLock ||
   963                      opc == Op_MemBarAcquireLock ||
   792                      opc == Op_LoadFence)) ||
   964                      opc == Op_LoadFence)) ||
   793           opc == Op_MemBarRelease ||
   965           opc == Op_MemBarRelease ||
   794           opc == Op_StoreFence ||
   966           opc == Op_StoreFence ||
   795           opc == Op_MemBarReleaseLock ||
   967           opc == Op_MemBarReleaseLock ||
       
   968           opc == Op_MemBarStoreStore ||
   796           opc == Op_MemBarCPUOrder) {
   969           opc == Op_MemBarCPUOrder) {
   797         Node* mem = current->in(0)->in(TypeFunc::Memory);
   970         Node* mem = current->in(0)->in(TypeFunc::Memory);
   798         if (mem->is_MergeMem()) {
   971         if (mem->is_MergeMem()) {
   799           MergeMemNode* merge = mem->as_MergeMem();
   972           MergeMemNode* merge = mem->as_MergeMem();
   800           Node* new_st = merge->memory_at(alias_idx);
   973           Node* new_st = merge->memory_at(alias_idx);
   861       InitializeNode* init = st->in(0)->as_Initialize();
  1034       InitializeNode* init = st->in(0)->as_Initialize();
   862       AllocateNode* alloc = init->allocation();
  1035       AllocateNode* alloc = init->allocation();
   863       if ((alloc != NULL) && (alloc == ld_alloc)) {
  1036       if ((alloc != NULL) && (alloc == ld_alloc)) {
   864         // examine a captured store value
  1037         // examine a captured store value
   865         st = init->find_captured_store(ld_off, memory_size(), phase);
  1038         st = init->find_captured_store(ld_off, memory_size(), phase);
   866         if (st != NULL)
  1039         if (st != NULL) {
   867           continue;             // take one more trip around
  1040           continue;             // take one more trip around
       
  1041         }
   868       }
  1042       }
   869     }
  1043     }
   870 
  1044 
   871     // Load boxed value from result of valueOf() call is input parameter.
  1045     // Load boxed value from result of valueOf() call is input parameter.
   872     if (this->is_Load() && ld_adr->is_AddP() &&
  1046     if (this->is_Load() && ld_adr->is_AddP() &&
  1333         if (result != NULL) return result;
  1507         if (result != NULL) return result;
  1334       }
  1508       }
  1335     }
  1509     }
  1336   }
  1510   }
  1337 
  1511 
       
  1512   // Is there a dominating load that loads the same value?  Leave
       
  1513   // anything that is not a load of a field/array element (like
       
  1514   // barriers etc.) alone
       
  1515   if (in(0) != NULL && adr_type() != TypeRawPtr::BOTTOM && can_reshape) {
       
  1516     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
       
  1517       Node *use = mem->fast_out(i);
       
  1518       if (use != this &&
       
  1519           use->Opcode() == Opcode() &&
       
  1520           use->in(0) != NULL &&
       
  1521           use->in(0) != in(0) &&
       
  1522           use->in(Address) == in(Address)) {
       
  1523         Node* ctl = in(0);
       
  1524         for (int i = 0; i < 10 && ctl != NULL; i++) {
       
  1525           ctl = IfNode::up_one_dom(ctl);
       
  1526           if (ctl == use->in(0)) {
       
  1527             set_req(0, use->in(0));
       
  1528             return this;
       
  1529           }
       
  1530         }
       
  1531       }
       
  1532     }
       
  1533   }
       
  1534 
  1338   // Check for prior store with a different base or offset; make Load
  1535   // Check for prior store with a different base or offset; make Load
  1339   // independent.  Skip through any number of them.  Bail out if the stores
  1536   // independent.  Skip through any number of them.  Bail out if the stores
  1340   // are in an endless dead cycle and report no progress.  This is a key
  1537   // are in an endless dead cycle and report no progress.  This is a key
  1341   // transform for Reflection.  However, if after skipping through the Stores
  1538   // transform for Reflection.  However, if after skipping through the Stores
  1342   // we can't then fold up against a prior store do NOT do the transform as
  1539   // we can't then fold up against a prior store do NOT do the transform as
  1346   // anti-dependence work knows how to bypass.  I.e. we need all
  1543   // anti-dependence work knows how to bypass.  I.e. we need all
  1347   // anti-dependence checks to ask the same Oracle.  Right now, that Oracle is
  1544   // anti-dependence checks to ask the same Oracle.  Right now, that Oracle is
  1348   // the alias index stuff.  So instead, peek through Stores and IFF we can
  1545   // the alias index stuff.  So instead, peek through Stores and IFF we can
  1349   // fold up, do so.
  1546   // fold up, do so.
  1350   Node* prev_mem = find_previous_store(phase);
  1547   Node* prev_mem = find_previous_store(phase);
       
  1548   if (prev_mem != NULL) {
       
  1549     Node* value = can_see_arraycopy_value(prev_mem, phase);
       
  1550     if (value != NULL) {
       
  1551       return value;
       
  1552     }
       
  1553   }
  1351   // Steps (a), (b):  Walk past independent stores to find an exact match.
  1554   // Steps (a), (b):  Walk past independent stores to find an exact match.
  1352   if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) {
  1555   if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) {
  1353     // (c) See if we can fold up on the spot, but don't fold up here.
  1556     // (c) See if we can fold up on the spot, but don't fold up here.
  1354     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
  1557     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
  1355     // just return a prior value, which is done by Identity calls.
  1558     // just return a prior value, which is done by Identity calls.
  2527   init_req(ExpectedIn, ex );
  2730   init_req(ExpectedIn, ex );
  2528 }
  2731 }
  2529 
  2732 
  2530 //=============================================================================
  2733 //=============================================================================
  2531 //-------------------------------adr_type--------------------------------------
  2734 //-------------------------------adr_type--------------------------------------
  2532 // Do we Match on this edge index or not?  Do not match memory
       
  2533 const TypePtr* ClearArrayNode::adr_type() const {
  2735 const TypePtr* ClearArrayNode::adr_type() const {
  2534   Node *adr = in(3);
  2736   Node *adr = in(3);
  2535   if (adr == NULL)  return NULL; // node is dead
  2737   if (adr == NULL)  return NULL; // node is dead
  2536   return MemNode::calculate_adr_type(adr->bottom_type());
  2738   return MemNode::calculate_adr_type(adr->bottom_type());
  2537 }
  2739 }