hotspot/src/share/vm/opto/memnode.cpp
changeset 19770 7cb9f982ea81
parent 18449 0afc7507c3c1
child 19995 55af95bea4e7
equal deleted inserted replaced
19768:239cc27c7524 19770:7cb9f982ea81
   960 uint LoadNode::hash() const {
   960 uint LoadNode::hash() const {
   961   // unroll addition of interesting fields
   961   // unroll addition of interesting fields
   962   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
   962   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
   963 }
   963 }
   964 
   964 
       
   965 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
       
   966   if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
       
   967     bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
       
   968     bool is_stable_ary = FoldStableValues &&
       
   969                          (tp != NULL) && (tp->isa_aryptr() != NULL) &&
       
   970                          tp->isa_aryptr()->is_stable();
       
   971 
       
   972     return (eliminate_boxing && non_volatile) || is_stable_ary;
       
   973   }
       
   974 
       
   975   return false;
       
   976 }
       
   977 
   965 //---------------------------can_see_stored_value------------------------------
   978 //---------------------------can_see_stored_value------------------------------
   966 // This routine exists to make sure this set of tests is done the same
   979 // This routine exists to make sure this set of tests is done the same
   967 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
   980 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
   968 // will change the graph shape in a way which makes memory alive twice at the
   981 // will change the graph shape in a way which makes memory alive twice at the
   969 // same time (uses the Oracle model of aliasing), then some
   982 // same time (uses the Oracle model of aliasing), then some
   974   intptr_t ld_off = 0;
   987   intptr_t ld_off = 0;
   975   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
   988   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
   976   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
   989   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
   977   Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
   990   Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
   978   // This is more general than load from boxing objects.
   991   // This is more general than load from boxing objects.
   979   if (phase->C->eliminate_boxing() && (atp != NULL) &&
   992   if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
   980       (atp->index() >= Compile::AliasIdxRaw) &&
       
   981       (atp->field() != NULL) && !atp->field()->is_volatile()) {
       
   982     uint alias_idx = atp->index();
   993     uint alias_idx = atp->index();
   983     bool final = atp->field()->is_final();
   994     bool final = !atp->is_rewritable();
   984     Node* result = NULL;
   995     Node* result = NULL;
   985     Node* current = st;
   996     Node* current = st;
   986     // Skip through chains of MemBarNodes checking the MergeMems for
   997     // Skip through chains of MemBarNodes checking the MergeMems for
   987     // new states for the slice of this load.  Stop once any other
   998     // new states for the slice of this load.  Stop once any other
   988     // kind of node is encountered.  Loads from final memory can skip
   999     // kind of node is encountered.  Loads from final memory can skip
  1012     }
  1023     }
  1013     if (result != NULL) {
  1024     if (result != NULL) {
  1014       st = result;
  1025       st = result;
  1015     }
  1026     }
  1016   }
  1027   }
  1017 
       
  1018 
  1028 
  1019   // Loop around twice in the case Load -> Initialize -> Store.
  1029   // Loop around twice in the case Load -> Initialize -> Store.
  1020   // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
  1030   // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
  1021   for (int trip = 0; trip <= 1; trip++) {
  1031   for (int trip = 0; trip <= 1; trip++) {
  1022 
  1032 
  1575 
  1585 
  1576   // No match.
  1586   // No match.
  1577   return NULL;
  1587   return NULL;
  1578 }
  1588 }
  1579 
  1589 
       
  1590 // Try to constant-fold a stable array element.
       
  1591 static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
       
  1592   assert(ary->is_stable(), "array should be stable");
       
  1593 
       
  1594   if (ary->const_oop() != NULL) {
       
  1595     // Decode the results of GraphKit::array_element_address.
       
  1596     ciArray* aobj = ary->const_oop()->as_array();
       
  1597     ciConstant con = aobj->element_value_by_offset(off);
       
  1598 
       
  1599     if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
       
  1600       const Type* con_type = Type::make_from_constant(con);
       
  1601       if (con_type != NULL) {
       
  1602         if (con_type->isa_aryptr()) {
       
  1603           // Join with the array element type, in case it is also stable.
       
  1604           int dim = ary->stable_dimension();
       
  1605           con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
       
  1606         }
       
  1607         if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
       
  1608           con_type = con_type->make_narrowoop();
       
  1609         }
       
  1610 #ifndef PRODUCT
       
  1611         if (TraceIterativeGVN) {
       
  1612           tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
       
  1613           con_type->dump(); tty->cr();
       
  1614         }
       
  1615 #endif //PRODUCT
       
  1616         return con_type;
       
  1617       }
       
  1618     }
       
  1619   }
       
  1620 
       
  1621   return NULL;
       
  1622 }
       
  1623 
  1580 //------------------------------Value-----------------------------------------
  1624 //------------------------------Value-----------------------------------------
  1581 const Type *LoadNode::Value( PhaseTransform *phase ) const {
  1625 const Type *LoadNode::Value( PhaseTransform *phase ) const {
  1582   // Either input is TOP ==> the result is TOP
  1626   // Either input is TOP ==> the result is TOP
  1583   Node* mem = in(MemNode::Memory);
  1627   Node* mem = in(MemNode::Memory);
  1584   const Type *t1 = phase->type(mem);
  1628   const Type *t1 = phase->type(mem);
  1589   int off = tp->offset();
  1633   int off = tp->offset();
  1590   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
  1634   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
  1591   Compile* C = phase->C;
  1635   Compile* C = phase->C;
  1592 
  1636 
  1593   // Try to guess loaded type from pointer type
  1637   // Try to guess loaded type from pointer type
  1594   if (tp->base() == Type::AryPtr) {
  1638   if (tp->isa_aryptr()) {
  1595     const Type *t = tp->is_aryptr()->elem();
  1639     const TypeAryPtr* ary = tp->is_aryptr();
       
  1640     const Type *t = ary->elem();
       
  1641 
       
  1642     // Determine whether the reference is beyond the header or not, by comparing
       
  1643     // the offset against the offset of the start of the array's data.
       
  1644     // Different array types begin at slightly different offsets (12 vs. 16).
       
  1645     // We choose T_BYTE as an example base type that is least restrictive
       
  1646     // as to alignment, which will therefore produce the smallest
       
  1647     // possible base offset.
       
  1648     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
       
  1649     const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
       
  1650 
       
  1651     // Try to constant-fold a stable array element.
       
  1652     if (FoldStableValues && ary->is_stable()) {
       
  1653       // Make sure the reference is not into the header
       
  1654       if (off_beyond_header && off != Type::OffsetBot) {
       
  1655         assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant");
       
  1656         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
       
  1657         if (con_type != NULL) {
       
  1658           return con_type;
       
  1659         }
       
  1660       }
       
  1661     }
       
  1662 
  1596     // Don't do this for integer types. There is only potential profit if
  1663     // Don't do this for integer types. There is only potential profit if
  1597     // the element type t is lower than _type; that is, for int types, if _type is
  1664     // the element type t is lower than _type; that is, for int types, if _type is
  1598     // more restrictive than t.  This only happens here if one is short and the other
  1665     // more restrictive than t.  This only happens here if one is short and the other
  1599     // char (both 16 bits), and in those cases we've made an intentional decision
  1666     // char (both 16 bits), and in those cases we've made an intentional decision
  1600     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
  1667     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
  1611     if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
  1678     if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
  1612         && (_type->isa_vect() == NULL)
  1679         && (_type->isa_vect() == NULL)
  1613         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
  1680         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
  1614       // t might actually be lower than _type, if _type is a unique
  1681       // t might actually be lower than _type, if _type is a unique
  1615       // concrete subclass of abstract class t.
  1682       // concrete subclass of abstract class t.
  1616       // Make sure the reference is not into the header, by comparing
  1683       if (off_beyond_header) {  // is the offset beyond the header?
  1617       // the offset against the offset of the start of the array's data.
       
  1618       // Different array types begin at slightly different offsets (12 vs. 16).
       
  1619       // We choose T_BYTE as an example base type that is least restrictive
       
  1620       // as to alignment, which will therefore produce the smallest
       
  1621       // possible base offset.
       
  1622       const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
       
  1623       if ((uint)off >= (uint)min_base_off) {  // is the offset beyond the header?
       
  1624         const Type* jt = t->join(_type);
  1684         const Type* jt = t->join(_type);
  1625         // In any case, do not allow the join, per se, to empty out the type.
  1685         // In any case, do not allow the join, per se, to empty out the type.
  1626         if (jt->empty() && !t->empty()) {
  1686         if (jt->empty() && !t->empty()) {
  1627           // This can happen if a interface-typed array narrows to a class type.
  1687           // This can happen if a interface-typed array narrows to a class type.
  1628           jt = _type;
  1688           jt = _type;