hotspot/src/share/vm/opto/memnode.cpp
changeset 17383 3665c0901a0d
parent 15875 638b3e8fbe5e
child 18105 dc31f0146a53
child 18073 f02460441ddc
equal deleted inserted replaced
17382:bba473b81ec0 17383:3665c0901a0d
   101 
   101 
   102 extern void print_alias_types();
   102 extern void print_alias_types();
   103 
   103 
   104 #endif
   104 #endif
   105 
   105 
   106 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) {
   106 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
   107   const TypeOopPtr *tinst = t_adr->isa_oopptr();
   107   assert((t_oop != NULL), "sanity");
   108   if (tinst == NULL || !tinst->is_known_instance_field())
   108   bool is_instance = t_oop->is_known_instance_field();
       
   109   bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
       
   110                              (load != NULL) && load->is_Load() &&
       
   111                              (phase->is_IterGVN() != NULL);
       
   112   if (!(is_instance || is_boxed_value_load))
   109     return mchain;  // don't try to optimize non-instance types
   113     return mchain;  // don't try to optimize non-instance types
   110   uint instance_id = tinst->instance_id();
   114   uint instance_id = t_oop->instance_id();
   111   Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
   115   Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
   112   Node *prev = NULL;
   116   Node *prev = NULL;
   113   Node *result = mchain;
   117   Node *result = mchain;
   114   while (prev != result) {
   118   while (prev != result) {
   115     prev = result;
   119     prev = result;
   120       Node *proj_in = result->in(0);
   124       Node *proj_in = result->in(0);
   121       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
   125       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
   122         break;  // hit one of our sentinels
   126         break;  // hit one of our sentinels
   123       } else if (proj_in->is_Call()) {
   127       } else if (proj_in->is_Call()) {
   124         CallNode *call = proj_in->as_Call();
   128         CallNode *call = proj_in->as_Call();
   125         if (!call->may_modify(t_adr, phase)) {
   129         if (!call->may_modify(t_oop, phase)) { // returns false for instances
   126           result = call->in(TypeFunc::Memory);
   130           result = call->in(TypeFunc::Memory);
   127         }
   131         }
   128       } else if (proj_in->is_Initialize()) {
   132       } else if (proj_in->is_Initialize()) {
   129         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
   133         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
   130         // Stop if this is the initialization for the object instance which
   134         // Stop if this is the initialization for the object instance which
   131         // which contains this memory slice, otherwise skip over it.
   135         // which contains this memory slice, otherwise skip over it.
   132         if (alloc != NULL && alloc->_idx != instance_id) {
   136         if ((alloc == NULL) || (alloc->_idx == instance_id)) {
       
   137           break;
       
   138         }
       
   139         if (is_instance) {
   133           result = proj_in->in(TypeFunc::Memory);
   140           result = proj_in->in(TypeFunc::Memory);
       
   141         } else if (is_boxed_value_load) {
       
   142           Node* klass = alloc->in(AllocateNode::KlassNode);
       
   143           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
       
   144           if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
       
   145             result = proj_in->in(TypeFunc::Memory); // not related allocation
       
   146           }
   134         }
   147         }
   135       } else if (proj_in->is_MemBar()) {
   148       } else if (proj_in->is_MemBar()) {
   136         result = proj_in->in(TypeFunc::Memory);
   149         result = proj_in->in(TypeFunc::Memory);
   137       } else {
   150       } else {
   138         assert(false, "unexpected projection");
   151         assert(false, "unexpected projection");
   139       }
   152       }
   140     } else if (result->is_ClearArray()) {
   153     } else if (result->is_ClearArray()) {
   141       if (!ClearArrayNode::step_through(&result, instance_id, phase)) {
   154       if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
   142         // Can not bypass initialization of the instance
   155         // Can not bypass initialization of the instance
   143         // we are looking for.
   156         // we are looking for.
   144         break;
   157         break;
   145       }
   158       }
   146       // Otherwise skip it (the call updated 'result' value).
   159       // Otherwise skip it (the call updated 'result' value).
   147     } else if (result->is_MergeMem()) {
   160     } else if (result->is_MergeMem()) {
   148       result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
   161       result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
   149     }
   162     }
   150   }
   163   }
   151   return result;
   164   return result;
   152 }
   165 }
   153 
   166 
   154 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) {
   167 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
   155   const TypeOopPtr *t_oop = t_adr->isa_oopptr();
   168   const TypeOopPtr* t_oop = t_adr->isa_oopptr();
   156   bool is_instance = (t_oop != NULL) && t_oop->is_known_instance_field();
   169   if (t_oop == NULL)
       
   170     return mchain;  // don't try to optimize non-oop types
       
   171   Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
       
   172   bool is_instance = t_oop->is_known_instance_field();
   157   PhaseIterGVN *igvn = phase->is_IterGVN();
   173   PhaseIterGVN *igvn = phase->is_IterGVN();
   158   Node *result = mchain;
       
   159   result = optimize_simple_memory_chain(result, t_adr, phase);
       
   160   if (is_instance && igvn != NULL  && result->is_Phi()) {
   174   if (is_instance && igvn != NULL  && result->is_Phi()) {
   161     PhiNode *mphi = result->as_Phi();
   175     PhiNode *mphi = result->as_Phi();
   162     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
   176     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
   163     const TypePtr *t = mphi->adr_type();
   177     const TypePtr *t = mphi->adr_type();
   164     if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
   178     if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
   381 
   395 
   382   // Currently 'sub' is either Allocate, Initialize or Start nodes.
   396   // Currently 'sub' is either Allocate, Initialize or Start nodes.
   383   // Or Region for the check in LoadNode::Ideal();
   397   // Or Region for the check in LoadNode::Ideal();
   384   // 'sub' should have sub->in(0) != NULL.
   398   // 'sub' should have sub->in(0) != NULL.
   385   assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() ||
   399   assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() ||
   386          sub->is_Region(), "expecting only these nodes");
   400          sub->is_Region() || sub->is_Call(), "expecting only these nodes");
   387 
   401 
   388   // Get control edge of 'sub'.
   402   // Get control edge of 'sub'.
   389   Node* orig_sub = sub;
   403   Node* orig_sub = sub;
   390   sub = sub->find_exact_control(sub->in(0));
   404   sub = sub->find_exact_control(sub->in(0));
   391   if (sub == NULL || sub->is_top())
   405   if (sub == NULL || sub->is_top())
   955 // same time (uses the Oracle model of aliasing), then some
   969 // same time (uses the Oracle model of aliasing), then some
   956 // LoadXNode::Identity will fold things back to the equivalence-class model
   970 // LoadXNode::Identity will fold things back to the equivalence-class model
   957 // of aliasing.
   971 // of aliasing.
   958 Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
   972 Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
   959   Node* ld_adr = in(MemNode::Address);
   973   Node* ld_adr = in(MemNode::Address);
   960 
   974   intptr_t ld_off = 0;
       
   975   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
   961   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
   976   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
   962   Compile::AliasType* atp = tp != NULL ? phase->C->alias_type(tp) : NULL;
   977   Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
   963   if (EliminateAutoBox && atp != NULL && atp->index() >= Compile::AliasIdxRaw &&
   978   // This is more general than load from boxing objects.
   964       atp->field() != NULL && !atp->field()->is_volatile()) {
   979   if (phase->C->eliminate_boxing() && (atp != NULL) &&
       
   980       (atp->index() >= Compile::AliasIdxRaw) &&
       
   981       (atp->field() != NULL) && !atp->field()->is_volatile()) {
   965     uint alias_idx = atp->index();
   982     uint alias_idx = atp->index();
   966     bool final = atp->field()->is_final();
   983     bool final = atp->field()->is_final();
   967     Node* result = NULL;
   984     Node* result = NULL;
   968     Node* current = st;
   985     Node* current = st;
   969     // Skip through chains of MemBarNodes checking the MergeMems for
   986     // Skip through chains of MemBarNodes checking the MergeMems for
   981         if (mem->is_MergeMem()) {
   998         if (mem->is_MergeMem()) {
   982           MergeMemNode* merge = mem->as_MergeMem();
   999           MergeMemNode* merge = mem->as_MergeMem();
   983           Node* new_st = merge->memory_at(alias_idx);
  1000           Node* new_st = merge->memory_at(alias_idx);
   984           if (new_st == merge->base_memory()) {
  1001           if (new_st == merge->base_memory()) {
   985             // Keep searching
  1002             // Keep searching
   986             current = merge->base_memory();
  1003             current = new_st;
   987             continue;
  1004             continue;
   988           }
  1005           }
   989           // Save the new memory state for the slice and fall through
  1006           // Save the new memory state for the slice and fall through
   990           // to exit.
  1007           // to exit.
   991           result = new_st;
  1008           result = new_st;
  1008       if (!phase->eqv(st_adr, ld_adr)) {
  1025       if (!phase->eqv(st_adr, ld_adr)) {
  1009         // Try harder before giving up...  Match raw and non-raw pointers.
  1026         // Try harder before giving up...  Match raw and non-raw pointers.
  1010         intptr_t st_off = 0;
  1027         intptr_t st_off = 0;
  1011         AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off);
  1028         AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off);
  1012         if (alloc == NULL)       return NULL;
  1029         if (alloc == NULL)       return NULL;
  1013         intptr_t ld_off = 0;
  1030         if (alloc != ld_alloc)   return NULL;
  1014         AllocateNode* allo2 = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
       
  1015         if (alloc != allo2)      return NULL;
       
  1016         if (ld_off != st_off)    return NULL;
  1031         if (ld_off != st_off)    return NULL;
  1017         // At this point we have proven something like this setup:
  1032         // At this point we have proven something like this setup:
  1018         //  A = Allocate(...)
  1033         //  A = Allocate(...)
  1019         //  L = LoadQ(,  AddP(CastPP(, A.Parm),, #Off))
  1034         //  L = LoadQ(,  AddP(CastPP(, A.Parm),, #Off))
  1020         //  S = StoreQ(, AddP(,        A.Parm  , #Off), V)
  1035         //  S = StoreQ(, AddP(,        A.Parm  , #Off), V)
  1027       if (store_Opcode() != st->Opcode())
  1042       if (store_Opcode() != st->Opcode())
  1028         return NULL;
  1043         return NULL;
  1029       return st->in(MemNode::ValueIn);
  1044       return st->in(MemNode::ValueIn);
  1030     }
  1045     }
  1031 
  1046 
  1032     intptr_t offset = 0;  // scratch
       
  1033 
       
  1034     // A load from a freshly-created object always returns zero.
  1047     // A load from a freshly-created object always returns zero.
  1035     // (This can happen after LoadNode::Ideal resets the load's memory input
  1048     // (This can happen after LoadNode::Ideal resets the load's memory input
  1036     // to find_captured_store, which returned InitializeNode::zero_memory.)
  1049     // to find_captured_store, which returned InitializeNode::zero_memory.)
  1037     if (st->is_Proj() && st->in(0)->is_Allocate() &&
  1050     if (st->is_Proj() && st->in(0)->is_Allocate() &&
  1038         st->in(0) == AllocateNode::Ideal_allocation(ld_adr, phase, offset) &&
  1051         (st->in(0) == ld_alloc) &&
  1039         offset >= st->in(0)->as_Allocate()->minimum_header_size()) {
  1052         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
  1040       // return a zero value for the load's basic type
  1053       // return a zero value for the load's basic type
  1041       // (This is one of the few places where a generic PhaseTransform
  1054       // (This is one of the few places where a generic PhaseTransform
  1042       // can create new nodes.  Think of it as lazily manifesting
  1055       // can create new nodes.  Think of it as lazily manifesting
  1043       // virtually pre-existing constants.)
  1056       // virtually pre-existing constants.)
  1044       return phase->zerocon(memory_type());
  1057       return phase->zerocon(memory_type());
  1046 
  1059 
  1047     // A load from an initialization barrier can match a captured store.
  1060     // A load from an initialization barrier can match a captured store.
  1048     if (st->is_Proj() && st->in(0)->is_Initialize()) {
  1061     if (st->is_Proj() && st->in(0)->is_Initialize()) {
  1049       InitializeNode* init = st->in(0)->as_Initialize();
  1062       InitializeNode* init = st->in(0)->as_Initialize();
  1050       AllocateNode* alloc = init->allocation();
  1063       AllocateNode* alloc = init->allocation();
  1051       if (alloc != NULL &&
  1064       if ((alloc != NULL) && (alloc == ld_alloc)) {
  1052           alloc == AllocateNode::Ideal_allocation(ld_adr, phase, offset)) {
       
  1053         // examine a captured store value
  1065         // examine a captured store value
  1054         st = init->find_captured_store(offset, memory_size(), phase);
  1066         st = init->find_captured_store(ld_off, memory_size(), phase);
  1055         if (st != NULL)
  1067         if (st != NULL)
  1056           continue;             // take one more trip around
  1068           continue;             // take one more trip around
  1057       }
  1069       }
  1058     }
  1070     }
  1059 
  1071 
       
  1072     // Load boxed value from result of valueOf() call is input parameter.
       
  1073     if (this->is_Load() && ld_adr->is_AddP() &&
       
  1074         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
       
  1075       intptr_t ignore = 0;
       
  1076       Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
       
  1077       if (base != NULL && base->is_Proj() &&
       
  1078           base->as_Proj()->_con == TypeFunc::Parms &&
       
  1079           base->in(0)->is_CallStaticJava() &&
       
  1080           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
       
  1081         return base->in(0)->in(TypeFunc::Parms);
       
  1082       }
       
  1083     }
       
  1084 
  1060     break;
  1085     break;
  1061   }
  1086   }
  1062 
  1087 
  1063   return NULL;
  1088   return NULL;
  1064 }
  1089 }
  1065 
  1090 
  1066 //----------------------is_instance_field_load_with_local_phi------------------
  1091 //----------------------is_instance_field_load_with_local_phi------------------
  1067 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
  1092 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
  1068   if( in(MemNode::Memory)->is_Phi() && in(MemNode::Memory)->in(0) == ctrl &&
  1093   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
  1069       in(MemNode::Address)->is_AddP() ) {
  1094       in(Address)->is_AddP() ) {
  1070     const TypeOopPtr* t_oop = in(MemNode::Address)->bottom_type()->isa_oopptr();
  1095     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
  1071     // Only instances.
  1096     // Only instances and boxed values.
  1072     if( t_oop != NULL && t_oop->is_known_instance_field() &&
  1097     if( t_oop != NULL &&
       
  1098         (t_oop->is_ptr_to_boxed_value() ||
       
  1099          t_oop->is_known_instance_field()) &&
  1073         t_oop->offset() != Type::OffsetBot &&
  1100         t_oop->offset() != Type::OffsetBot &&
  1074         t_oop->offset() != Type::OffsetTop) {
  1101         t_oop->offset() != Type::OffsetTop) {
  1075       return true;
  1102       return true;
  1076     }
  1103     }
  1077   }
  1104   }
  1081 //------------------------------Identity---------------------------------------
  1108 //------------------------------Identity---------------------------------------
  1082 // Loads are identity if previous store is to same address
  1109 // Loads are identity if previous store is to same address
  1083 Node *LoadNode::Identity( PhaseTransform *phase ) {
  1110 Node *LoadNode::Identity( PhaseTransform *phase ) {
  1084   // If the previous store-maker is the right kind of Store, and the store is
  1111   // If the previous store-maker is the right kind of Store, and the store is
  1085   // to the same address, then we are equal to the value stored.
  1112   // to the same address, then we are equal to the value stored.
  1086   Node* mem = in(MemNode::Memory);
  1113   Node* mem = in(Memory);
  1087   Node* value = can_see_stored_value(mem, phase);
  1114   Node* value = can_see_stored_value(mem, phase);
  1088   if( value ) {
  1115   if( value ) {
  1089     // byte, short & char stores truncate naturally.
  1116     // byte, short & char stores truncate naturally.
  1090     // A load has to load the truncated value which requires
  1117     // A load has to load the truncated value which requires
  1091     // some sort of masking operation and that requires an
  1118     // some sort of masking operation and that requires an
  1103 
  1130 
  1104   // Search for an existing data phi which was generated before for the same
  1131   // Search for an existing data phi which was generated before for the same
  1105   // instance's field to avoid infinite generation of phis in a loop.
  1132   // instance's field to avoid infinite generation of phis in a loop.
  1106   Node *region = mem->in(0);
  1133   Node *region = mem->in(0);
  1107   if (is_instance_field_load_with_local_phi(region)) {
  1134   if (is_instance_field_load_with_local_phi(region)) {
  1108     const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr();
  1135     const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
  1109     int this_index  = phase->C->get_alias_index(addr_t);
  1136     int this_index  = phase->C->get_alias_index(addr_t);
  1110     int this_offset = addr_t->offset();
  1137     int this_offset = addr_t->offset();
  1111     int this_id    = addr_t->is_oopptr()->instance_id();
  1138     int this_iid    = addr_t->instance_id();
       
  1139     if (!addr_t->is_known_instance() &&
       
  1140          addr_t->is_ptr_to_boxed_value()) {
       
  1141       // Use _idx of address base (could be Phi node) for boxed values.
       
  1142       intptr_t   ignore = 0;
       
  1143       Node*      base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
       
  1144       this_iid = base->_idx;
       
  1145     }
  1112     const Type* this_type = bottom_type();
  1146     const Type* this_type = bottom_type();
  1113     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
  1147     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
  1114       Node* phi = region->fast_out(i);
  1148       Node* phi = region->fast_out(i);
  1115       if (phi->is_Phi() && phi != mem &&
  1149       if (phi->is_Phi() && phi != mem &&
  1116           phi->as_Phi()->is_same_inst_field(this_type, this_id, this_index, this_offset)) {
  1150           phi->as_Phi()->is_same_inst_field(this_type, this_iid, this_index, this_offset)) {
  1117         return phi;
  1151         return phi;
  1118       }
  1152       }
  1119     }
  1153     }
  1120   }
  1154   }
  1121 
  1155 
  1122   return this;
  1156   return this;
  1123 }
  1157 }
  1124 
       
  1125 
       
  1126 // Returns true if the AliasType refers to the field that holds the
       
  1127 // cached box array.  Currently only handles the IntegerCache case.
       
  1128 static bool is_autobox_cache(Compile::AliasType* atp) {
       
  1129   if (atp != NULL && atp->field() != NULL) {
       
  1130     ciField* field = atp->field();
       
  1131     ciSymbol* klass = field->holder()->name();
       
  1132     if (field->name() == ciSymbol::cache_field_name() &&
       
  1133         field->holder()->uses_default_loader() &&
       
  1134         klass == ciSymbol::java_lang_Integer_IntegerCache()) {
       
  1135       return true;
       
  1136     }
       
  1137   }
       
  1138   return false;
       
  1139 }
       
  1140 
       
  1141 // Fetch the base value in the autobox array
       
  1142 static bool fetch_autobox_base(Compile::AliasType* atp, int& cache_offset) {
       
  1143   if (atp != NULL && atp->field() != NULL) {
       
  1144     ciField* field = atp->field();
       
  1145     ciSymbol* klass = field->holder()->name();
       
  1146     if (field->name() == ciSymbol::cache_field_name() &&
       
  1147         field->holder()->uses_default_loader() &&
       
  1148         klass == ciSymbol::java_lang_Integer_IntegerCache()) {
       
  1149       assert(field->is_constant(), "what?");
       
  1150       ciObjArray* array = field->constant_value().as_object()->as_obj_array();
       
  1151       // Fetch the box object at the base of the array and get its value
       
  1152       ciInstance* box = array->obj_at(0)->as_instance();
       
  1153       ciInstanceKlass* ik = box->klass()->as_instance_klass();
       
  1154       if (ik->nof_nonstatic_fields() == 1) {
       
  1155         // This should be true nonstatic_field_at requires calling
       
  1156         // nof_nonstatic_fields so check it anyway
       
  1157         ciConstant c = box->field_value(ik->nonstatic_field_at(0));
       
  1158         cache_offset = c.as_int();
       
  1159       }
       
  1160       return true;
       
  1161     }
       
  1162   }
       
  1163   return false;
       
  1164 }
       
  1165 
       
  1166 // Returns true if the AliasType refers to the value field of an
       
  1167 // autobox object.  Currently only handles Integer.
       
  1168 static bool is_autobox_object(Compile::AliasType* atp) {
       
  1169   if (atp != NULL && atp->field() != NULL) {
       
  1170     ciField* field = atp->field();
       
  1171     ciSymbol* klass = field->holder()->name();
       
  1172     if (field->name() == ciSymbol::value_name() &&
       
  1173         field->holder()->uses_default_loader() &&
       
  1174         klass == ciSymbol::java_lang_Integer()) {
       
  1175       return true;
       
  1176     }
       
  1177   }
       
  1178   return false;
       
  1179 }
       
  1180 
       
  1181 
  1158 
  1182 // We're loading from an object which has autobox behaviour.
  1159 // We're loading from an object which has autobox behaviour.
  1183 // If this object is result of a valueOf call we'll have a phi
  1160 // If this object is result of a valueOf call we'll have a phi
  1184 // merging a newly allocated object and a load from the cache.
  1161 // merging a newly allocated object and a load from the cache.
  1185 // We want to replace this load with the original incoming
  1162 // We want to replace this load with the original incoming
  1186 // argument to the valueOf call.
  1163 // argument to the valueOf call.
  1187 Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
  1164 Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
  1188   Node* base = in(Address)->in(AddPNode::Base);
  1165   assert(phase->C->eliminate_boxing(), "sanity");
  1189   if (base->is_Phi() && base->req() == 3) {
  1166   intptr_t ignore = 0;
  1190     AllocateNode* allocation = NULL;
  1167   Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
  1191     int allocation_index = -1;
  1168   if ((base == NULL) || base->is_Phi()) {
  1192     int load_index = -1;
  1169     // Push the loads from the phi that comes from valueOf up
  1193     for (uint i = 1; i < base->req(); i++) {
  1170     // through it to allow elimination of the loads and the recovery
  1194       allocation = AllocateNode::Ideal_allocation(base->in(i), phase);
  1171     // of the original value. It is done in split_through_phi().
  1195       if (allocation != NULL) {
  1172     return NULL;
  1196         allocation_index = i;
       
  1197         load_index = 3 - allocation_index;
       
  1198         break;
       
  1199       }
       
  1200     }
       
  1201     bool has_load = ( allocation != NULL &&
       
  1202                       (base->in(load_index)->is_Load() ||
       
  1203                        base->in(load_index)->is_DecodeN() &&
       
  1204                        base->in(load_index)->in(1)->is_Load()) );
       
  1205     if (has_load && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) {
       
  1206       // Push the loads from the phi that comes from valueOf up
       
  1207       // through it to allow elimination of the loads and the recovery
       
  1208       // of the original value.
       
  1209       Node* mem_phi = in(Memory);
       
  1210       Node* offset = in(Address)->in(AddPNode::Offset);
       
  1211       Node* region = base->in(0);
       
  1212 
       
  1213       Node* in1 = clone();
       
  1214       Node* in1_addr = in1->in(Address)->clone();
       
  1215       in1_addr->set_req(AddPNode::Base, base->in(allocation_index));
       
  1216       in1_addr->set_req(AddPNode::Address, base->in(allocation_index));
       
  1217       in1_addr->set_req(AddPNode::Offset, offset);
       
  1218       in1->set_req(0, region->in(allocation_index));
       
  1219       in1->set_req(Address, in1_addr);
       
  1220       in1->set_req(Memory, mem_phi->in(allocation_index));
       
  1221 
       
  1222       Node* in2 = clone();
       
  1223       Node* in2_addr = in2->in(Address)->clone();
       
  1224       in2_addr->set_req(AddPNode::Base, base->in(load_index));
       
  1225       in2_addr->set_req(AddPNode::Address, base->in(load_index));
       
  1226       in2_addr->set_req(AddPNode::Offset, offset);
       
  1227       in2->set_req(0, region->in(load_index));
       
  1228       in2->set_req(Address, in2_addr);
       
  1229       in2->set_req(Memory, mem_phi->in(load_index));
       
  1230 
       
  1231       in1_addr = phase->transform(in1_addr);
       
  1232       in1 =      phase->transform(in1);
       
  1233       in2_addr = phase->transform(in2_addr);
       
  1234       in2 =      phase->transform(in2);
       
  1235 
       
  1236       PhiNode* result = PhiNode::make_blank(region, this);
       
  1237       result->set_req(allocation_index, in1);
       
  1238       result->set_req(load_index, in2);
       
  1239       return result;
       
  1240     }
       
  1241   } else if (base->is_Load() ||
  1173   } else if (base->is_Load() ||
  1242              base->is_DecodeN() && base->in(1)->is_Load()) {
  1174              base->is_DecodeN() && base->in(1)->is_Load()) {
  1243     if (base->is_DecodeN()) {
  1175     // Eliminate the load of boxed value for integer types from the cache
  1244       // Get LoadN node which loads cached Integer object
       
  1245       base = base->in(1);
       
  1246     }
       
  1247     // Eliminate the load of Integer.value for integers from the cache
       
  1248     // array by deriving the value from the index into the array.
  1176     // array by deriving the value from the index into the array.
  1249     // Capture the offset of the load and then reverse the computation.
  1177     // Capture the offset of the load and then reverse the computation.
  1250     Node* load_base = base->in(Address)->in(AddPNode::Base);
  1178 
  1251     if (load_base->is_DecodeN()) {
  1179     // Get LoadN node which loads a boxing object from 'cache' array.
  1252       // Get LoadN node which loads IntegerCache.cache field
  1180     if (base->is_DecodeN()) {
  1253       load_base = load_base->in(1);
  1181       base = base->in(1);
  1254     }
  1182     }
  1255     if (load_base != NULL) {
  1183     if (!base->in(Address)->is_AddP()) {
  1256       Compile::AliasType* atp = phase->C->alias_type(load_base->adr_type());
  1184       return NULL; // Complex address
  1257       intptr_t cache_offset;
  1185     }
  1258       int shift = -1;
  1186     AddPNode* address = base->in(Address)->as_AddP();
  1259       Node* cache = NULL;
  1187     Node* cache_base = address->in(AddPNode::Base);
  1260       if (is_autobox_cache(atp)) {
  1188     if ((cache_base != NULL) && cache_base->is_DecodeN()) {
  1261         shift  = exact_log2(type2aelembytes(T_OBJECT));
  1189       // Get ConP node which is static 'cache' field.
  1262         cache = AddPNode::Ideal_base_and_offset(load_base->in(Address), phase, cache_offset);
  1190       cache_base = cache_base->in(1);
  1263       }
  1191     }
  1264       if (cache != NULL && base->in(Address)->is_AddP()) {
  1192     if ((cache_base != NULL) && cache_base->is_Con()) {
       
  1193       const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr();
       
  1194       if ((base_type != NULL) && base_type->is_autobox_cache()) {
  1265         Node* elements[4];
  1195         Node* elements[4];
  1266         int count = base->in(Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
  1196         int shift = exact_log2(type2aelembytes(T_OBJECT));
  1267         int cache_low;
  1197         int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
  1268         if (count > 0 && fetch_autobox_base(atp, cache_low)) {
  1198         if ((count >  0) && elements[0]->is_Con() &&
  1269           int offset = arrayOopDesc::base_offset_in_bytes(memory_type()) - (cache_low << shift);
  1199             ((count == 1) ||
  1270           // Add up all the offsets making of the address of the load
  1200              (count == 2) && elements[1]->Opcode() == Op_LShiftX &&
  1271           Node* result = elements[0];
  1201                              elements[1]->in(2) == phase->intcon(shift))) {
  1272           for (int i = 1; i < count; i++) {
  1202           ciObjArray* array = base_type->const_oop()->as_obj_array();
  1273             result = phase->transform(new (phase->C) AddXNode(result, elements[i]));
  1203           // Fetch the box object cache[0] at the base of the array and get its value
       
  1204           ciInstance* box = array->obj_at(0)->as_instance();
       
  1205           ciInstanceKlass* ik = box->klass()->as_instance_klass();
       
  1206           assert(ik->is_box_klass(), "sanity");
       
  1207           assert(ik->nof_nonstatic_fields() == 1, "change following code");
       
  1208           if (ik->nof_nonstatic_fields() == 1) {
       
  1209             // This should be true nonstatic_field_at requires calling
       
  1210             // nof_nonstatic_fields so check it anyway
       
  1211             ciConstant c = box->field_value(ik->nonstatic_field_at(0));
       
  1212             BasicType bt = c.basic_type();
       
  1213             // Only integer types have boxing cache.
       
  1214             assert(bt == T_BOOLEAN || bt == T_CHAR  ||
       
  1215                    bt == T_BYTE    || bt == T_SHORT ||
       
  1216                    bt == T_INT     || bt == T_LONG, err_msg_res("wrong type = %s", type2name(bt)));
       
  1217             jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
       
  1218             if (cache_low != (int)cache_low) {
       
  1219               return NULL; // should not happen since cache is array indexed by value
       
  1220             }
       
  1221             jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
       
  1222             if (offset != (int)offset) {
       
  1223               return NULL; // should not happen since cache is array indexed by value
       
  1224             }
       
  1225            // Add up all the offsets making of the address of the load
       
  1226             Node* result = elements[0];
       
  1227             for (int i = 1; i < count; i++) {
       
  1228               result = phase->transform(new (phase->C) AddXNode(result, elements[i]));
       
  1229             }
       
  1230             // Remove the constant offset from the address and then
       
  1231             result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-(int)offset)));
       
  1232             // remove the scaling of the offset to recover the original index.
       
  1233             if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
       
  1234               // Peel the shift off directly but wrap it in a dummy node
       
  1235               // since Ideal can't return existing nodes
       
  1236               result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0));
       
  1237             } else if (result->is_Add() && result->in(2)->is_Con() &&
       
  1238                        result->in(1)->Opcode() == Op_LShiftX &&
       
  1239                        result->in(1)->in(2) == phase->intcon(shift)) {
       
  1240               // We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z)
       
  1241               // but for boxing cache access we know that X<<Z will not overflow
       
  1242               // (there is range check) so we do this optimizatrion by hand here.
       
  1243               Node* add_con = new (phase->C) RShiftXNode(result->in(2), phase->intcon(shift));
       
  1244               result = new (phase->C) AddXNode(result->in(1)->in(1), phase->transform(add_con));
       
  1245             } else {
       
  1246               result = new (phase->C) RShiftXNode(result, phase->intcon(shift));
       
  1247             }
       
  1248 #ifdef _LP64
       
  1249             if (bt != T_LONG) {
       
  1250               result = new (phase->C) ConvL2INode(phase->transform(result));
       
  1251             }
       
  1252 #else
       
  1253             if (bt == T_LONG) {
       
  1254               result = new (phase->C) ConvI2LNode(phase->transform(result));
       
  1255             }
       
  1256 #endif
       
  1257             return result;
  1274           }
  1258           }
  1275           // Remove the constant offset from the address and then
       
  1276           // remove the scaling of the offset to recover the original index.
       
  1277           result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-offset)));
       
  1278           if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
       
  1279             // Peel the shift off directly but wrap it in a dummy node
       
  1280             // since Ideal can't return existing nodes
       
  1281             result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0));
       
  1282           } else {
       
  1283             result = new (phase->C) RShiftXNode(result, phase->intcon(shift));
       
  1284           }
       
  1285 #ifdef _LP64
       
  1286           result = new (phase->C) ConvL2INode(phase->transform(result));
       
  1287 #endif
       
  1288           return result;
       
  1289         }
  1259         }
  1290       }
  1260       }
  1291     }
  1261     }
  1292   }
  1262   }
  1293   return NULL;
  1263   return NULL;
  1294 }
  1264 }
  1295 
  1265 
  1296 //------------------------------split_through_phi------------------------------
  1266 static bool stable_phi(PhiNode* phi, PhaseGVN *phase) {
  1297 // Split instance field load through Phi.
  1267   Node* region = phi->in(0);
  1298 Node *LoadNode::split_through_phi(PhaseGVN *phase) {
       
  1299   Node* mem     = in(MemNode::Memory);
       
  1300   Node* address = in(MemNode::Address);
       
  1301   const TypePtr *addr_t = phase->type(address)->isa_ptr();
       
  1302   const TypeOopPtr *t_oop = addr_t->isa_oopptr();
       
  1303 
       
  1304   assert(mem->is_Phi() && (t_oop != NULL) &&
       
  1305          t_oop->is_known_instance_field(), "invalide conditions");
       
  1306 
       
  1307   Node *region = mem->in(0);
       
  1308   if (region == NULL) {
  1268   if (region == NULL) {
  1309     return NULL; // Wait stable graph
  1269     return false; // Wait stable graph
  1310   }
  1270   }
  1311   uint cnt = mem->req();
  1271   uint cnt = phi->req();
  1312   for (uint i = 1; i < cnt; i++) {
  1272   for (uint i = 1; i < cnt; i++) {
  1313     Node* rc = region->in(i);
  1273     Node* rc = region->in(i);
  1314     if (rc == NULL || phase->type(rc) == Type::TOP)
  1274     if (rc == NULL || phase->type(rc) == Type::TOP)
       
  1275       return false; // Wait stable graph
       
  1276     Node* in = phi->in(i);
       
  1277     if (in == NULL || phase->type(in) == Type::TOP)
       
  1278       return false; // Wait stable graph
       
  1279   }
       
  1280   return true;
       
  1281 }
       
  1282 //------------------------------split_through_phi------------------------------
       
  1283 // Split instance or boxed field load through Phi.
       
  1284 Node *LoadNode::split_through_phi(PhaseGVN *phase) {
       
  1285   Node* mem     = in(Memory);
       
  1286   Node* address = in(Address);
       
  1287   const TypeOopPtr *t_oop = phase->type(address)->isa_oopptr();
       
  1288 
       
  1289   assert((t_oop != NULL) &&
       
  1290          (t_oop->is_known_instance_field() ||
       
  1291           t_oop->is_ptr_to_boxed_value()), "invalide conditions");
       
  1292 
       
  1293   Compile* C = phase->C;
       
  1294   intptr_t ignore = 0;
       
  1295   Node*    base = AddPNode::Ideal_base_and_offset(address, phase, ignore);
       
  1296   bool base_is_phi = (base != NULL) && base->is_Phi();
       
  1297   bool load_boxed_values = t_oop->is_ptr_to_boxed_value() && C->aggressive_unboxing() &&
       
  1298                            (base != NULL) && (base == address->in(AddPNode::Base)) &&
       
  1299                            phase->type(base)->higher_equal(TypePtr::NOTNULL);
       
  1300 
       
  1301   if (!((mem->is_Phi() || base_is_phi) &&
       
  1302         (load_boxed_values || t_oop->is_known_instance_field()))) {
       
  1303     return NULL; // memory is not Phi
       
  1304   }
       
  1305 
       
  1306   if (mem->is_Phi()) {
       
  1307     if (!stable_phi(mem->as_Phi(), phase)) {
  1315       return NULL; // Wait stable graph
  1308       return NULL; // Wait stable graph
  1316     Node *in = mem->in(i);
  1309     }
  1317     if (in == NULL) {
  1310     uint cnt = mem->req();
       
  1311     // Check for loop invariant memory.
       
  1312     if (cnt == 3) {
       
  1313       for (uint i = 1; i < cnt; i++) {
       
  1314         Node* in = mem->in(i);
       
  1315         Node*  m = optimize_memory_chain(in, t_oop, this, phase);
       
  1316         if (m == mem) {
       
  1317           set_req(Memory, mem->in(cnt - i));
       
  1318           return this; // made change
       
  1319         }
       
  1320       }
       
  1321     }
       
  1322   }
       
  1323   if (base_is_phi) {
       
  1324     if (!stable_phi(base->as_Phi(), phase)) {
  1318       return NULL; // Wait stable graph
  1325       return NULL; // Wait stable graph
  1319     }
  1326     }
  1320   }
  1327     uint cnt = base->req();
  1321   // Check for loop invariant.
  1328     // Check for loop invariant memory.
  1322   if (cnt == 3) {
  1329     if (cnt == 3) {
  1323     for (uint i = 1; i < cnt; i++) {
  1330       for (uint i = 1; i < cnt; i++) {
  1324       Node *in = mem->in(i);
  1331         if (base->in(i) == base) {
  1325       Node* m = MemNode::optimize_memory_chain(in, addr_t, phase);
  1332           return NULL; // Wait stable graph
  1326       if (m == mem) {
  1333         }
  1327         set_req(MemNode::Memory, mem->in(cnt - i)); // Skip this phi.
  1334       }
  1328         return this;
  1335     }
  1329       }
  1336   }
  1330     }
  1337 
  1331   }
  1338   bool load_boxed_phi = load_boxed_values && base_is_phi && (base->in(0) == mem->in(0));
       
  1339 
  1332   // Split through Phi (see original code in loopopts.cpp).
  1340   // Split through Phi (see original code in loopopts.cpp).
  1333   assert(phase->C->have_alias_type(addr_t), "instance should have alias type");
  1341   assert(C->have_alias_type(t_oop), "instance should have alias type");
  1334 
  1342 
  1335   // Do nothing here if Identity will find a value
  1343   // Do nothing here if Identity will find a value
  1336   // (to avoid infinite chain of value phis generation).
  1344   // (to avoid infinite chain of value phis generation).
  1337   if (!phase->eqv(this, this->Identity(phase)))
  1345   if (!phase->eqv(this, this->Identity(phase)))
  1338     return NULL;
  1346     return NULL;
  1339 
  1347 
  1340   // Skip the split if the region dominates some control edge of the address.
  1348   // Select Region to split through.
  1341   if (!MemNode::all_controls_dominate(address, region))
  1349   Node* region;
  1342     return NULL;
  1350   if (!base_is_phi) {
       
  1351     assert(mem->is_Phi(), "sanity");
       
  1352     region = mem->in(0);
       
  1353     // Skip if the region dominates some control edge of the address.
       
  1354     if (!MemNode::all_controls_dominate(address, region))
       
  1355       return NULL;
       
  1356   } else if (!mem->is_Phi()) {
       
  1357     assert(base_is_phi, "sanity");
       
  1358     region = base->in(0);
       
  1359     // Skip if the region dominates some control edge of the memory.
       
  1360     if (!MemNode::all_controls_dominate(mem, region))
       
  1361       return NULL;
       
  1362   } else if (base->in(0) != mem->in(0)) {
       
  1363     assert(base_is_phi && mem->is_Phi(), "sanity");
       
  1364     if (MemNode::all_controls_dominate(mem, base->in(0))) {
       
  1365       region = base->in(0);
       
  1366     } else if (MemNode::all_controls_dominate(address, mem->in(0))) {
       
  1367       region = mem->in(0);
       
  1368     } else {
       
  1369       return NULL; // complex graph
       
  1370     }
       
  1371   } else {
       
  1372     assert(base->in(0) == mem->in(0), "sanity");
       
  1373     region = mem->in(0);
       
  1374   }
  1343 
  1375 
  1344   const Type* this_type = this->bottom_type();
  1376   const Type* this_type = this->bottom_type();
  1345   int this_index  = phase->C->get_alias_index(addr_t);
  1377   int this_index  = C->get_alias_index(t_oop);
  1346   int this_offset = addr_t->offset();
  1378   int this_offset = t_oop->offset();
  1347   int this_iid    = addr_t->is_oopptr()->instance_id();
  1379   int this_iid    = t_oop->instance_id();
  1348   PhaseIterGVN *igvn = phase->is_IterGVN();
  1380   if (!t_oop->is_known_instance() && load_boxed_values) {
  1349   Node *phi = new (igvn->C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
  1381     // Use _idx of address base for boxed values.
       
  1382     this_iid = base->_idx;
       
  1383   }
       
  1384   PhaseIterGVN* igvn = phase->is_IterGVN();
       
  1385   Node* phi = new (C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
  1350   for (uint i = 1; i < region->req(); i++) {
  1386   for (uint i = 1; i < region->req(); i++) {
  1351     Node *x;
  1387     Node* x;
  1352     Node* the_clone = NULL;
  1388     Node* the_clone = NULL;
  1353     if (region->in(i) == phase->C->top()) {
  1389     if (region->in(i) == C->top()) {
  1354       x = phase->C->top();      // Dead path?  Use a dead data op
  1390       x = C->top();      // Dead path?  Use a dead data op
  1355     } else {
  1391     } else {
  1356       x = this->clone();        // Else clone up the data op
  1392       x = this->clone();        // Else clone up the data op
  1357       the_clone = x;            // Remember for possible deletion.
  1393       the_clone = x;            // Remember for possible deletion.
  1358       // Alter data node to use pre-phi inputs
  1394       // Alter data node to use pre-phi inputs
  1359       if (this->in(0) == region) {
  1395       if (this->in(0) == region) {
  1360         x->set_req(0, region->in(i));
  1396         x->set_req(0, region->in(i));
  1361       } else {
  1397       } else {
  1362         x->set_req(0, NULL);
  1398         x->set_req(0, NULL);
  1363       }
  1399       }
  1364       for (uint j = 1; j < this->req(); j++) {
  1400       if (mem->is_Phi() && (mem->in(0) == region)) {
  1365         Node *in = this->in(j);
  1401         x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone.
  1366         if (in->is_Phi() && in->in(0) == region)
  1402       }
  1367           x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
  1403       if (address->is_Phi() && address->in(0) == region) {
       
  1404         x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone
       
  1405       }
       
  1406       if (base_is_phi && (base->in(0) == region)) {
       
  1407         Node* base_x = base->in(i); // Clone address for loads from boxed objects.
       
  1408         Node* adr_x = phase->transform(new (C) AddPNode(base_x,base_x,address->in(AddPNode::Offset)));
       
  1409         x->set_req(Address, adr_x);
  1368       }
  1410       }
  1369     }
  1411     }
  1370     // Check for a 'win' on some paths
  1412     // Check for a 'win' on some paths
  1371     const Type *t = x->Value(igvn);
  1413     const Type *t = x->Value(igvn);
  1372 
  1414 
  1392       x->raise_bottom_type(t);
  1434       x->raise_bottom_type(t);
  1393       Node *y = x->Identity(igvn);
  1435       Node *y = x->Identity(igvn);
  1394       if (y != x) {
  1436       if (y != x) {
  1395         x = y;
  1437         x = y;
  1396       } else {
  1438       } else {
  1397         y = igvn->hash_find(x);
  1439         y = igvn->hash_find_insert(x);
  1398         if (y) {
  1440         if (y) {
  1399           x = y;
  1441           x = y;
  1400         } else {
  1442         } else {
  1401           // Else x is a new node we are keeping
  1443           // Else x is a new node we are keeping
  1402           // We do not need register_new_node_with_optimizer
  1444           // We do not need register_new_node_with_optimizer
  1403           // because set_type has already been called.
  1445           // because set_type has already been called.
  1404           igvn->_worklist.push(x);
  1446           igvn->_worklist.push(x);
  1405         }
  1447         }
  1406       }
  1448       }
  1407     }
  1449     }
  1408     if (x != the_clone && the_clone != NULL)
  1450     if (x != the_clone && the_clone != NULL) {
  1409       igvn->remove_dead_node(the_clone);
  1451       igvn->remove_dead_node(the_clone);
       
  1452     }
  1410     phi->set_req(i, x);
  1453     phi->set_req(i, x);
  1411   }
  1454   }
  1412   // Record Phi
  1455   // Record Phi
  1413   igvn->register_new_node_with_optimizer(phi);
  1456   igvn->register_new_node_with_optimizer(phi);
  1414   return phi;
  1457   return phi;
  1443         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
  1486         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
  1444         && all_controls_dominate(base, phase->C->start())) {
  1487         && all_controls_dominate(base, phase->C->start())) {
  1445       // A method-invariant, non-null address (constant or 'this' argument).
  1488       // A method-invariant, non-null address (constant or 'this' argument).
  1446       set_req(MemNode::Control, NULL);
  1489       set_req(MemNode::Control, NULL);
  1447     }
  1490     }
  1448 
       
  1449     if (EliminateAutoBox && can_reshape) {
       
  1450       assert(!phase->type(base)->higher_equal(TypePtr::NULL_PTR), "the autobox pointer should be non-null");
       
  1451       Compile::AliasType* atp = phase->C->alias_type(adr_type());
       
  1452       if (is_autobox_object(atp)) {
       
  1453         Node* result = eliminate_autobox(phase);
       
  1454         if (result != NULL) return result;
       
  1455       }
       
  1456     }
       
  1457   }
  1491   }
  1458 
  1492 
  1459   Node* mem = in(MemNode::Memory);
  1493   Node* mem = in(MemNode::Memory);
  1460   const TypePtr *addr_t = phase->type(address)->isa_ptr();
  1494   const TypePtr *addr_t = phase->type(address)->isa_ptr();
  1461 
  1495 
  1462   if (addr_t != NULL) {
  1496   if (can_reshape && (addr_t != NULL)) {
  1463     // try to optimize our memory input
  1497     // try to optimize our memory input
  1464     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, phase);
  1498     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
  1465     if (opt_mem != mem) {
  1499     if (opt_mem != mem) {
  1466       set_req(MemNode::Memory, opt_mem);
  1500       set_req(MemNode::Memory, opt_mem);
  1467       if (phase->type( opt_mem ) == Type::TOP) return NULL;
  1501       if (phase->type( opt_mem ) == Type::TOP) return NULL;
  1468       return this;
  1502       return this;
  1469     }
  1503     }
  1470     const TypeOopPtr *t_oop = addr_t->isa_oopptr();
  1504     const TypeOopPtr *t_oop = addr_t->isa_oopptr();
  1471     if (can_reshape && opt_mem->is_Phi() &&
  1505     if ((t_oop != NULL) &&
  1472         (t_oop != NULL) && t_oop->is_known_instance_field()) {
  1506         (t_oop->is_known_instance_field() ||
       
  1507          t_oop->is_ptr_to_boxed_value())) {
  1473       PhaseIterGVN *igvn = phase->is_IterGVN();
  1508       PhaseIterGVN *igvn = phase->is_IterGVN();
  1474       if (igvn != NULL && igvn->_worklist.member(opt_mem)) {
  1509       if (igvn != NULL && igvn->_worklist.member(opt_mem)) {
  1475         // Delay this transformation until memory Phi is processed.
  1510         // Delay this transformation until memory Phi is processed.
  1476         phase->is_IterGVN()->_worklist.push(this);
  1511         phase->is_IterGVN()->_worklist.push(this);
  1477         return NULL;
  1512         return NULL;
  1478       }
  1513       }
  1479       // Split instance field load through Phi.
  1514       // Split instance field load through Phi.
  1480       Node* result = split_through_phi(phase);
  1515       Node* result = split_through_phi(phase);
  1481       if (result != NULL) return result;
  1516       if (result != NULL) return result;
       
  1517 
       
  1518       if (t_oop->is_ptr_to_boxed_value()) {
       
  1519         Node* result = eliminate_autobox(phase);
       
  1520         if (result != NULL) return result;
       
  1521       }
  1482     }
  1522     }
  1483   }
  1523   }
  1484 
  1524 
  1485   // Check for prior store with a different base or offset; make Load
  1525   // Check for prior store with a different base or offset; make Load
  1486   // independent.  Skip through any number of them.  Bail out if the stores
  1526   // independent.  Skip through any number of them.  Bail out if the stores
  1585         // In any case, do not allow the join, per se, to empty out the type.
  1625         // In any case, do not allow the join, per se, to empty out the type.
  1586         if (jt->empty() && !t->empty()) {
  1626         if (jt->empty() && !t->empty()) {
  1587           // This can happen if a interface-typed array narrows to a class type.
  1627           // This can happen if a interface-typed array narrows to a class type.
  1588           jt = _type;
  1628           jt = _type;
  1589         }
  1629         }
  1590 
  1630 #ifdef ASSERT
  1591         if (EliminateAutoBox && adr->is_AddP()) {
  1631         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
  1592           // The pointers in the autobox arrays are always non-null
  1632           // The pointers in the autobox arrays are always non-null
  1593           Node* base = adr->in(AddPNode::Base);
  1633           Node* base = adr->in(AddPNode::Base);
  1594           if (base != NULL &&
  1634           if ((base != NULL) && base->is_DecodeN()) {
  1595               !phase->type(base)->higher_equal(TypePtr::NULL_PTR)) {
  1635             // Get LoadN node which loads IntegerCache.cache field
  1596             Compile::AliasType* atp = C->alias_type(base->adr_type());
  1636             base = base->in(1);
  1597             if (is_autobox_cache(atp)) {
  1637           }
  1598               return jt->join(TypePtr::NOTNULL)->is_ptr();
  1638           if ((base != NULL) && base->is_Con()) {
       
  1639             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
       
  1640             if ((base_type != NULL) && base_type->is_autobox_cache()) {
       
  1641               // It could be narrow oop
       
  1642               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
  1599             }
  1643             }
  1600           }
  1644           }
  1601         }
  1645         }
       
  1646 #endif
  1602         return jt;
  1647         return jt;
  1603       }
  1648       }
  1604     }
  1649     }
  1605   } else if (tp->base() == Type::InstPtr) {
  1650   } else if (tp->base() == Type::InstPtr) {
  1606     ciEnv* env = C->env();
  1651     ciEnv* env = C->env();
  1636       }
  1681       }
  1637     }
  1682     }
  1638     // Optimizations for constant objects
  1683     // Optimizations for constant objects
  1639     ciObject* const_oop = tinst->const_oop();
  1684     ciObject* const_oop = tinst->const_oop();
  1640     if (const_oop != NULL) {
  1685     if (const_oop != NULL) {
       
  1686       // For constant Boxed value treat the target field as a compile time constant.
       
  1687       if (tinst->is_ptr_to_boxed_value()) {
       
  1688         return tinst->get_const_boxed_value();
       
  1689       } else
  1641       // For constant CallSites treat the target field as a compile time constant.
  1690       // For constant CallSites treat the target field as a compile time constant.
  1642       if (const_oop->is_call_site()) {
  1691       if (const_oop->is_call_site()) {
  1643         ciCallSite* call_site = const_oop->as_call_site();
  1692         ciCallSite* call_site = const_oop->as_call_site();
  1644         ciField* field = call_site->klass()->as_instance_klass()->get_field_by_offset(off, /*is_static=*/ false);
  1693         ciField* field = call_site->klass()->as_instance_klass()->get_field_by_offset(off, /*is_static=*/ false);
  1645         if (field != NULL && field->is_call_site_target()) {
  1694         if (field != NULL && field->is_call_site_target()) {
  1757   // If we are loading from a freshly-allocated object, produce a zero,
  1806   // If we are loading from a freshly-allocated object, produce a zero,
  1758   // if the load is provably beyond the header of the object.
  1807   // if the load is provably beyond the header of the object.
  1759   // (Also allow a variable load from a fresh array to produce zero.)
  1808   // (Also allow a variable load from a fresh array to produce zero.)
  1760   const TypeOopPtr *tinst = tp->isa_oopptr();
  1809   const TypeOopPtr *tinst = tp->isa_oopptr();
  1761   bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
  1810   bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
  1762   if (ReduceFieldZeroing || is_instance) {
  1811   bool is_boxed_value = (tinst != NULL) && tinst->is_ptr_to_boxed_value();
       
  1812   if (ReduceFieldZeroing || is_instance || is_boxed_value) {
  1763     Node* value = can_see_stored_value(mem,phase);
  1813     Node* value = can_see_stored_value(mem,phase);
  1764     if (value != NULL && value->is_Con()) {
  1814     if (value != NULL && value->is_Con()) {
  1765       assert(value->bottom_type()->higher_equal(_type),"sanity");
  1815       assert(value->bottom_type()->higher_equal(_type),"sanity");
  1766       return value->bottom_type();
  1816       return value->bottom_type();
  1767     }
  1817     }
  2881   if (remove_dead_region(phase, can_reshape)) return this;
  2931   if (remove_dead_region(phase, can_reshape)) return this;
  2882   // Don't bother trying to transform a dead node
  2932   // Don't bother trying to transform a dead node
  2883   if (in(0) && in(0)->is_top())  return NULL;
  2933   if (in(0) && in(0)->is_top())  return NULL;
  2884 
  2934 
  2885   // Eliminate volatile MemBars for scalar replaced objects.
  2935   // Eliminate volatile MemBars for scalar replaced objects.
  2886   if (can_reshape && req() == (Precedent+1) &&
  2936   if (can_reshape && req() == (Precedent+1)) {
  2887       (Opcode() == Op_MemBarAcquire || Opcode() == Op_MemBarVolatile)) {
  2937     bool eliminate = false;
  2888     // Volatile field loads and stores.
  2938     int opc = Opcode();
  2889     Node* my_mem = in(MemBarNode::Precedent);
  2939     if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
  2890     if (my_mem != NULL && my_mem->is_Mem()) {
  2940       // Volatile field loads and stores.
  2891       const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
  2941       Node* my_mem = in(MemBarNode::Precedent);
  2892       // Check for scalar replaced object reference.
  2942       if (my_mem != NULL && my_mem->is_Mem()) {
  2893       if( t_oop != NULL && t_oop->is_known_instance_field() &&
  2943         const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
  2894           t_oop->offset() != Type::OffsetBot &&
  2944         // Check for scalar replaced object reference.
  2895           t_oop->offset() != Type::OffsetTop) {
  2945         if( t_oop != NULL && t_oop->is_known_instance_field() &&
  2896         // Replace MemBar projections by its inputs.
  2946             t_oop->offset() != Type::OffsetBot &&
  2897         PhaseIterGVN* igvn = phase->is_IterGVN();
  2947             t_oop->offset() != Type::OffsetTop) {
  2898         igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
  2948           eliminate = true;
  2899         igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
  2949         }
  2900         // Must return either the original node (now dead) or a new node
  2950       }
  2901         // (Do not return a top here, since that would break the uniqueness of top.)
  2951     } else if (opc == Op_MemBarRelease) {
  2902         return new (phase->C) ConINode(TypeInt::ZERO);
  2952       // Final field stores.
  2903       }
  2953       Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
       
  2954       if ((alloc != NULL) && alloc->is_Allocate() &&
       
  2955           alloc->as_Allocate()->_is_non_escaping) {
       
  2956         // The allocated object does not escape.
       
  2957         eliminate = true;
       
  2958       }
       
  2959     }
       
  2960     if (eliminate) {
       
  2961       // Replace MemBar projections by its inputs.
       
  2962       PhaseIterGVN* igvn = phase->is_IterGVN();
       
  2963       igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
       
  2964       igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
       
  2965       // Must return either the original node (now dead) or a new node
       
  2966       // (Do not return a top here, since that would break the uniqueness of top.)
       
  2967       return new (phase->C) ConINode(TypeInt::ZERO);
  2904     }
  2968     }
  2905   }
  2969   }
  2906   return NULL;
  2970   return NULL;
  2907 }
  2971 }
  2908 
  2972 
  3111 // "simple enough" to be folded into an object initialization.
  3175 // "simple enough" to be folded into an object initialization.
  3112 // Attempts to prove that a store's initial value 'n' can be captured
  3176 // Attempts to prove that a store's initial value 'n' can be captured
  3113 // within the initialization without creating a vicious cycle, such as:
  3177 // within the initialization without creating a vicious cycle, such as:
  3114 //     { Foo p = new Foo(); p.next = p; }
  3178 //     { Foo p = new Foo(); p.next = p; }
  3115 // True for constants and parameters and small combinations thereof.
  3179 // True for constants and parameters and small combinations thereof.
  3116 bool InitializeNode::detect_init_independence(Node* n,
  3180 bool InitializeNode::detect_init_independence(Node* n, int& count) {
  3117                                               bool st_is_pinned,
       
  3118                                               int& count) {
       
  3119   if (n == NULL)      return true;   // (can this really happen?)
  3181   if (n == NULL)      return true;   // (can this really happen?)
  3120   if (n->is_Proj())   n = n->in(0);
  3182   if (n->is_Proj())   n = n->in(0);
  3121   if (n == this)      return false;  // found a cycle
  3183   if (n == this)      return false;  // found a cycle
  3122   if (n->is_Con())    return true;
  3184   if (n->is_Con())    return true;
  3123   if (n->is_Start())  return true;   // params, etc., are OK
  3185   if (n->is_Start())  return true;   // params, etc., are OK
  3133     // must have preceded the init, or else be equal to the init.
  3195     // must have preceded the init, or else be equal to the init.
  3134     // Even after loop optimizations (which might change control edges)
  3196     // Even after loop optimizations (which might change control edges)
  3135     // a store is never pinned *before* the availability of its inputs.
  3197     // a store is never pinned *before* the availability of its inputs.
  3136     if (!MemNode::all_controls_dominate(n, this))
  3198     if (!MemNode::all_controls_dominate(n, this))
  3137       return false;                  // failed to prove a good control
  3199       return false;                  // failed to prove a good control
  3138 
       
  3139   }
  3200   }
  3140 
  3201 
  3141   // Check data edges for possible dependencies on 'this'.
  3202   // Check data edges for possible dependencies on 'this'.
  3142   if ((count += 1) > 20)  return false;  // complexity limit
  3203   if ((count += 1) > 20)  return false;  // complexity limit
  3143   for (uint i = 1; i < n->req(); i++) {
  3204   for (uint i = 1; i < n->req(); i++) {
  3144     Node* m = n->in(i);
  3205     Node* m = n->in(i);
  3145     if (m == NULL || m == n || m->is_top())  continue;
  3206     if (m == NULL || m == n || m->is_top())  continue;
  3146     uint first_i = n->find_edge(m);
  3207     uint first_i = n->find_edge(m);
  3147     if (i != first_i)  continue;  // process duplicate edge just once
  3208     if (i != first_i)  continue;  // process duplicate edge just once
  3148     if (!detect_init_independence(m, st_is_pinned, count)) {
  3209     if (!detect_init_independence(m, count)) {
  3149       return false;
  3210       return false;
  3150     }
  3211     }
  3151   }
  3212   }
  3152 
  3213 
  3153   return true;
  3214   return true;
  3174     return FAIL;                // inscrutable address
  3235     return FAIL;                // inscrutable address
  3175   if (alloc != allocation())
  3236   if (alloc != allocation())
  3176     return FAIL;                // wrong allocation!  (store needs to float up)
  3237     return FAIL;                // wrong allocation!  (store needs to float up)
  3177   Node* val = st->in(MemNode::ValueIn);
  3238   Node* val = st->in(MemNode::ValueIn);
  3178   int complexity_count = 0;
  3239   int complexity_count = 0;
  3179   if (!detect_init_independence(val, true, complexity_count))
  3240   if (!detect_init_independence(val, complexity_count))
  3180     return FAIL;                // stored value must be 'simple enough'
  3241     return FAIL;                // stored value must be 'simple enough'
  3181 
  3242 
  3182   // The Store can be captured only if nothing after the allocation
  3243   // The Store can be captured only if nothing after the allocation
  3183   // and before the Store is using the memory location that the store
  3244   // and before the Store is using the memory location that the store
  3184   // overwrites.
  3245   // overwrites.