131 result = optimize_simple_memory_chain(result, t_adr, phase); |
131 result = optimize_simple_memory_chain(result, t_adr, phase); |
132 if (is_instance && igvn != NULL && result->is_Phi()) { |
132 if (is_instance && igvn != NULL && result->is_Phi()) { |
133 PhiNode *mphi = result->as_Phi(); |
133 PhiNode *mphi = result->as_Phi(); |
134 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); |
134 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); |
135 const TypePtr *t = mphi->adr_type(); |
135 const TypePtr *t = mphi->adr_type(); |
136 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM) { |
136 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || |
|
137 t->isa_oopptr() && !t->is_oopptr()->is_instance() && |
|
138 t->is_oopptr()->cast_to_instance(t_oop->instance_id()) == t_oop) { |
137 // clone the Phi with our address type |
139 // clone the Phi with our address type |
138 result = mphi->split_out_instance(t_adr, igvn); |
140 result = mphi->split_out_instance(t_adr, igvn); |
139 } else { |
141 } else { |
140 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); |
142 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); |
141 } |
143 } |
261 |
263 |
262 // 'dom' dominates 'sub' if its control edge and control edges |
264 // 'dom' dominates 'sub' if its control edge and control edges |
263 // of all its inputs dominate or equal to sub's control edge. |
265 // of all its inputs dominate or equal to sub's control edge. |
264 |
266 |
265 // Currently 'sub' is either Allocate, Initialize or Start nodes. |
267 // Currently 'sub' is either Allocate, Initialize or Start nodes. |
266 assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start(), "expecting only these nodes"); |
268 // Or Region for the check in LoadNode::Ideal(); |
|
269 // 'sub' should have sub->in(0) != NULL. |
|
270 assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() || |
|
271 sub->is_Region(), "expecting only these nodes"); |
267 |
272 |
268 // Get control edge of 'sub'. |
273 // Get control edge of 'sub'. |
269 sub = sub->find_exact_control(sub->in(0)); |
274 sub = sub->find_exact_control(sub->in(0)); |
270 if (sub == NULL || sub->is_top()) |
275 if (sub == NULL || sub->is_top()) |
271 return false; // Conservative answer for dead code |
276 return false; // Conservative answer for dead code |
574 |
579 |
575 //------------------------------Ideal_DU_postCCP------------------------------- |
580 //------------------------------Ideal_DU_postCCP------------------------------- |
576 // Find any cast-away of null-ness and keep its control. Null cast-aways are |
581 // Find any cast-away of null-ness and keep its control. Null cast-aways are |
577 // going away in this pass and we need to make this memory op depend on the |
582 // going away in this pass and we need to make this memory op depend on the |
578 // gating null check. |
583 // gating null check. |
|
584 Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { |
|
585 return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address)); |
|
586 } |
579 |
587 |
580 // I tried to leave the CastPP's in. This makes the graph more accurate in |
588 // I tried to leave the CastPP's in. This makes the graph more accurate in |
581 // some sense; we get to keep around the knowledge that an oop is not-null |
589 // some sense; we get to keep around the knowledge that an oop is not-null |
582 // after some test. Alas, the CastPP's interfere with GVN (some values are |
590 // after some test. Alas, the CastPP's interfere with GVN (some values are |
583 // the regular oop, some are the CastPP of the oop, all merge at Phi's which |
591 // the regular oop, some are the CastPP of the oop, all merge at Phi's which |
584 // cannot collapse, etc). This cost us 10% on SpecJVM, even when I removed |
592 // cannot collapse, etc). This cost us 10% on SpecJVM, even when I removed |
585 // some of the more trivial cases in the optimizer. Removing more useless |
593 // some of the more trivial cases in the optimizer. Removing more useless |
586 // Phi's started allowing Loads to illegally float above null checks. I gave |
594 // Phi's started allowing Loads to illegally float above null checks. I gave |
587 // up on this approach. CNC 10/20/2000 |
595 // up on this approach. CNC 10/20/2000 |
588 Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { |
596 // This static method may be called not from MemNode (EncodePNode calls it). |
589 Node *ctr = in(MemNode::Control); |
597 // Only the control edge of the node 'n' might be updated. |
590 Node *mem = in(MemNode::Memory); |
598 Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) { |
591 Node *adr = in(MemNode::Address); |
|
592 Node *skipped_cast = NULL; |
599 Node *skipped_cast = NULL; |
593 // Need a null check? Regular static accesses do not because they are |
600 // Need a null check? Regular static accesses do not because they are |
594 // from constant addresses. Array ops are gated by the range check (which |
601 // from constant addresses. Array ops are gated by the range check (which |
595 // always includes a NULL check). Just check field ops. |
602 // always includes a NULL check). Just check field ops. |
596 if( !ctr ) { |
603 if( n->in(MemNode::Control) == NULL ) { |
597 // Scan upwards for the highest location we can place this memory op. |
604 // Scan upwards for the highest location we can place this memory op. |
598 while( true ) { |
605 while( true ) { |
599 switch( adr->Opcode() ) { |
606 switch( adr->Opcode() ) { |
600 |
607 |
601 case Op_AddP: // No change to NULL-ness, so peek thru AddP's |
608 case Op_AddP: // No change to NULL-ness, so peek thru AddP's |
650 // the node in place. 4959717. |
657 // the node in place. 4959717. |
651 skipped_cast = adr; |
658 skipped_cast = adr; |
652 adr = adr->in(1); |
659 adr = adr->in(1); |
653 continue; |
660 continue; |
654 } |
661 } |
655 ccp->hash_delete(this); |
662 ccp->hash_delete(n); |
656 set_req(MemNode::Control, adr->in(0)); |
663 n->set_req(MemNode::Control, adr->in(0)); |
657 ccp->hash_insert(this); |
664 ccp->hash_insert(n); |
658 return this; |
665 return n; |
659 |
666 |
660 // List of "safe" opcodes; those that implicitly block the memory |
667 // List of "safe" opcodes; those that implicitly block the memory |
661 // op below any null check. |
668 // op below any null check. |
662 case Op_CastX2P: // no null checks on native pointers |
669 case Op_CastX2P: // no null checks on native pointers |
663 case Op_Parm: // 'this' pointer is not null |
670 case Op_Parm: // 'this' pointer is not null |
664 case Op_LoadP: // Loading from within a klass |
671 case Op_LoadP: // Loading from within a klass |
665 case Op_LoadN: // Loading from within a klass |
672 case Op_LoadN: // Loading from within a klass |
666 case Op_LoadKlass: // Loading from within a klass |
673 case Op_LoadKlass: // Loading from within a klass |
667 case Op_ConP: // Loading from a klass |
674 case Op_ConP: // Loading from a klass |
|
675 case Op_ConN: // Loading from a klass |
668 case Op_CreateEx: // Sucking up the guts of an exception oop |
676 case Op_CreateEx: // Sucking up the guts of an exception oop |
669 case Op_Con: // Reading from TLS |
677 case Op_Con: // Reading from TLS |
670 case Op_CMoveP: // CMoveP is pinned |
678 case Op_CMoveP: // CMoveP is pinned |
671 break; // No progress |
679 break; // No progress |
672 |
680 |
674 case Op_SCMemProj: // Memory state from store conditional ops |
682 case Op_SCMemProj: // Memory state from store conditional ops |
675 #ifdef ASSERT |
683 #ifdef ASSERT |
676 { |
684 { |
677 assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value"); |
685 assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value"); |
678 const Node* call = adr->in(0); |
686 const Node* call = adr->in(0); |
679 if (call->is_CallStaticJava()) { |
687 if (call->is_CallJava()) { |
680 const CallStaticJavaNode* call_java = call->as_CallStaticJava(); |
688 const CallJavaNode* call_java = call->as_CallJava(); |
681 const TypeTuple *r = call_java->tf()->range(); |
689 const TypeTuple *r = call_java->tf()->range(); |
682 assert(r->cnt() > TypeFunc::Parms, "must return value"); |
690 assert(r->cnt() > TypeFunc::Parms, "must return value"); |
683 const Type* ret_type = r->field_at(TypeFunc::Parms); |
691 const Type* ret_type = r->field_at(TypeFunc::Parms); |
684 assert(ret_type && ret_type->isa_ptr(), "must return pointer"); |
692 assert(ret_type && ret_type->isa_ptr(), "must return pointer"); |
685 // We further presume that this is one of |
693 // We further presume that this is one of |
747 case T_FLOAT: return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt ); |
755 case T_FLOAT: return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt ); |
748 case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt ); |
756 case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt ); |
749 case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() ); |
757 case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() ); |
750 case T_OBJECT: |
758 case T_OBJECT: |
751 #ifdef _LP64 |
759 #ifdef _LP64 |
752 if (adr->bottom_type()->is_narrow()) { |
760 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
753 const TypeNarrowOop* narrowtype; |
761 const TypeNarrowOop* narrowtype; |
754 if (rt->isa_narrowoop()) { |
762 if (rt->isa_narrowoop()) { |
755 narrowtype = rt->is_narrowoop(); |
763 narrowtype = rt->is_narrowoop(); |
756 } else { |
764 } else { |
757 narrowtype = rt->is_oopptr()->make_narrowoop(); |
765 narrowtype = rt->is_oopptr()->make_narrowoop(); |
759 Node* load = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, narrowtype)); |
767 Node* load = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, narrowtype)); |
760 |
768 |
761 return DecodeNNode::decode(&gvn, load); |
769 return DecodeNNode::decode(&gvn, load); |
762 } else |
770 } else |
763 #endif |
771 #endif |
764 { |
772 { |
765 assert(!adr->bottom_type()->is_narrow(), "should have got back a narrow oop"); |
773 assert(!adr->bottom_type()->is_ptr_to_narrowoop(), "should have got back a narrow oop"); |
766 return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); |
774 return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); |
767 } |
775 } |
768 } |
776 } |
769 ShouldNotReachHere(); |
777 ShouldNotReachHere(); |
770 return (LoadNode*)NULL; |
778 return (LoadNode*)NULL; |
771 } |
779 } |
772 |
780 |
1116 } |
1124 } |
1117 } |
1125 } |
1118 return NULL; |
1126 return NULL; |
1119 } |
1127 } |
1120 |
1128 |
|
1129 //------------------------------split_through_phi------------------------------ |
|
1130 // Split instance field load through Phi. |
|
1131 Node *LoadNode::split_through_phi(PhaseGVN *phase) { |
|
1132 Node* mem = in(MemNode::Memory); |
|
1133 Node* address = in(MemNode::Address); |
|
1134 const TypePtr *addr_t = phase->type(address)->isa_ptr(); |
|
1135 const TypeOopPtr *t_oop = addr_t->isa_oopptr(); |
|
1136 |
|
1137 assert(mem->is_Phi() && (t_oop != NULL) && |
|
1138 t_oop->is_instance_field(), "invalide conditions"); |
|
1139 |
|
1140 Node *region = mem->in(0); |
|
1141 if (region == NULL) { |
|
1142 return NULL; // Wait stable graph |
|
1143 } |
|
1144 uint cnt = mem->req(); |
|
1145 for( uint i = 1; i < cnt; i++ ) { |
|
1146 Node *in = mem->in(i); |
|
1147 if( in == NULL ) { |
|
1148 return NULL; // Wait stable graph |
|
1149 } |
|
1150 } |
|
1151 // Check for loop invariant. |
|
1152 if (cnt == 3) { |
|
1153 for( uint i = 1; i < cnt; i++ ) { |
|
1154 Node *in = mem->in(i); |
|
1155 Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); |
|
1156 if (m == mem) { |
|
1157 set_req(MemNode::Memory, mem->in(cnt - i)); // Skip this phi. |
|
1158 return this; |
|
1159 } |
|
1160 } |
|
1161 } |
|
1162 // Split through Phi (see original code in loopopts.cpp). |
|
1163 assert(phase->C->have_alias_type(addr_t), "instance should have alias type"); |
|
1164 |
|
1165 // Do nothing here if Identity will find a value |
|
1166 // (to avoid infinite chain of value phis generation). |
|
1167 if ( !phase->eqv(this, this->Identity(phase)) ) |
|
1168 return NULL; |
|
1169 |
|
1170 // Skip the split if the region dominates some control edge of the address. |
|
1171 if (cnt == 3 && !MemNode::all_controls_dominate(address, region)) |
|
1172 return NULL; |
|
1173 |
|
1174 const Type* this_type = this->bottom_type(); |
|
1175 int this_index = phase->C->get_alias_index(addr_t); |
|
1176 int this_offset = addr_t->offset(); |
|
1177 int this_iid = addr_t->is_oopptr()->instance_id(); |
|
1178 int wins = 0; |
|
1179 PhaseIterGVN *igvn = phase->is_IterGVN(); |
|
1180 Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); |
|
1181 for( uint i = 1; i < region->req(); i++ ) { |
|
1182 Node *x; |
|
1183 Node* the_clone = NULL; |
|
1184 if( region->in(i) == phase->C->top() ) { |
|
1185 x = phase->C->top(); // Dead path? Use a dead data op |
|
1186 } else { |
|
1187 x = this->clone(); // Else clone up the data op |
|
1188 the_clone = x; // Remember for possible deletion. |
|
1189 // Alter data node to use pre-phi inputs |
|
1190 if( this->in(0) == region ) { |
|
1191 x->set_req( 0, region->in(i) ); |
|
1192 } else { |
|
1193 x->set_req( 0, NULL ); |
|
1194 } |
|
1195 for( uint j = 1; j < this->req(); j++ ) { |
|
1196 Node *in = this->in(j); |
|
1197 if( in->is_Phi() && in->in(0) == region ) |
|
1198 x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone |
|
1199 } |
|
1200 } |
|
1201 // Check for a 'win' on some paths |
|
1202 const Type *t = x->Value(igvn); |
|
1203 |
|
1204 bool singleton = t->singleton(); |
|
1205 |
|
1206 // See comments in PhaseIdealLoop::split_thru_phi(). |
|
1207 if( singleton && t == Type::TOP ) { |
|
1208 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); |
|
1209 } |
|
1210 |
|
1211 if( singleton ) { |
|
1212 wins++; |
|
1213 x = igvn->makecon(t); |
|
1214 } else { |
|
1215 // We now call Identity to try to simplify the cloned node. |
|
1216 // Note that some Identity methods call phase->type(this). |
|
1217 // Make sure that the type array is big enough for |
|
1218 // our new node, even though we may throw the node away. |
|
1219 // (This tweaking with igvn only works because x is a new node.) |
|
1220 igvn->set_type(x, t); |
|
1221 Node *y = x->Identity(igvn); |
|
1222 if( y != x ) { |
|
1223 wins++; |
|
1224 x = y; |
|
1225 } else { |
|
1226 y = igvn->hash_find(x); |
|
1227 if( y ) { |
|
1228 wins++; |
|
1229 x = y; |
|
1230 } else { |
|
1231 // Else x is a new node we are keeping |
|
1232 // We do not need register_new_node_with_optimizer |
|
1233 // because set_type has already been called. |
|
1234 igvn->_worklist.push(x); |
|
1235 } |
|
1236 } |
|
1237 } |
|
1238 if (x != the_clone && the_clone != NULL) |
|
1239 igvn->remove_dead_node(the_clone); |
|
1240 phi->set_req(i, x); |
|
1241 } |
|
1242 if( wins > 0 ) { |
|
1243 // Record Phi |
|
1244 igvn->register_new_node_with_optimizer(phi); |
|
1245 return phi; |
|
1246 } |
|
1247 igvn->remove_dead_node(phi); |
|
1248 return NULL; |
|
1249 } |
1121 |
1250 |
1122 //------------------------------Ideal------------------------------------------ |
1251 //------------------------------Ideal------------------------------------------ |
1123 // If the load is from Field memory and the pointer is non-null, we can |
1252 // If the load is from Field memory and the pointer is non-null, we can |
1124 // zero out the control input. |
1253 // zero out the control input. |
1125 // If the offset is constant and the base is an object allocation, |
1254 // If the offset is constant and the base is an object allocation, |
1173 return this; |
1302 return this; |
1174 } |
1303 } |
1175 const TypeOopPtr *t_oop = addr_t->isa_oopptr(); |
1304 const TypeOopPtr *t_oop = addr_t->isa_oopptr(); |
1176 if (can_reshape && opt_mem->is_Phi() && |
1305 if (can_reshape && opt_mem->is_Phi() && |
1177 (t_oop != NULL) && t_oop->is_instance_field()) { |
1306 (t_oop != NULL) && t_oop->is_instance_field()) { |
1178 assert(t_oop->offset() != Type::OffsetBot && t_oop->offset() != Type::OffsetTop, ""); |
1307 // Split instance field load through Phi. |
1179 Node *region = opt_mem->in(0); |
1308 Node* result = split_through_phi(phase); |
1180 uint cnt = opt_mem->req(); |
1309 if (result != NULL) return result; |
1181 for( uint i = 1; i < cnt; i++ ) { |
|
1182 Node *in = opt_mem->in(i); |
|
1183 if( in == NULL ) { |
|
1184 region = NULL; // Wait stable graph |
|
1185 break; |
|
1186 } |
|
1187 } |
|
1188 if (region != NULL) { |
|
1189 // Check for loop invariant. |
|
1190 if (cnt == 3) { |
|
1191 for( uint i = 1; i < cnt; i++ ) { |
|
1192 Node *in = opt_mem->in(i); |
|
1193 Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); |
|
1194 if (m == opt_mem) { |
|
1195 set_req(MemNode::Memory, opt_mem->in(cnt - i)); // Skip this phi. |
|
1196 return this; |
|
1197 } |
|
1198 } |
|
1199 } |
|
1200 // Split through Phi (see original code in loopopts.cpp). |
|
1201 assert(phase->C->have_alias_type(addr_t), "instance should have alias type"); |
|
1202 |
|
1203 // Do nothing here if Identity will find a value |
|
1204 // (to avoid infinite chain of value phis generation). |
|
1205 if ( !phase->eqv(this, this->Identity(phase)) ) |
|
1206 return NULL; |
|
1207 |
|
1208 const Type* this_type = this->bottom_type(); |
|
1209 int this_index = phase->C->get_alias_index(addr_t); |
|
1210 int this_offset = addr_t->offset(); |
|
1211 int this_iid = addr_t->is_oopptr()->instance_id(); |
|
1212 int wins = 0; |
|
1213 PhaseIterGVN *igvn = phase->is_IterGVN(); |
|
1214 Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); |
|
1215 for( uint i = 1; i < region->req(); i++ ) { |
|
1216 Node *x; |
|
1217 Node* the_clone = NULL; |
|
1218 if( region->in(i) == phase->C->top() ) { |
|
1219 x = phase->C->top(); // Dead path? Use a dead data op |
|
1220 } else { |
|
1221 x = this->clone(); // Else clone up the data op |
|
1222 the_clone = x; // Remember for possible deletion. |
|
1223 // Alter data node to use pre-phi inputs |
|
1224 if( this->in(0) == region ) { |
|
1225 x->set_req( 0, region->in(i) ); |
|
1226 } else { |
|
1227 x->set_req( 0, NULL ); |
|
1228 } |
|
1229 for( uint j = 1; j < this->req(); j++ ) { |
|
1230 Node *in = this->in(j); |
|
1231 if( in->is_Phi() && in->in(0) == region ) |
|
1232 x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone |
|
1233 } |
|
1234 } |
|
1235 // Check for a 'win' on some paths |
|
1236 const Type *t = x->Value(igvn); |
|
1237 |
|
1238 bool singleton = t->singleton(); |
|
1239 |
|
1240 // See comments in PhaseIdealLoop::split_thru_phi(). |
|
1241 if( singleton && t == Type::TOP ) { |
|
1242 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); |
|
1243 } |
|
1244 |
|
1245 if( singleton ) { |
|
1246 wins++; |
|
1247 x = igvn->makecon(t); |
|
1248 } else { |
|
1249 // We now call Identity to try to simplify the cloned node. |
|
1250 // Note that some Identity methods call phase->type(this). |
|
1251 // Make sure that the type array is big enough for |
|
1252 // our new node, even though we may throw the node away. |
|
1253 // (This tweaking with igvn only works because x is a new node.) |
|
1254 igvn->set_type(x, t); |
|
1255 Node *y = x->Identity(igvn); |
|
1256 if( y != x ) { |
|
1257 wins++; |
|
1258 x = y; |
|
1259 } else { |
|
1260 y = igvn->hash_find(x); |
|
1261 if( y ) { |
|
1262 wins++; |
|
1263 x = y; |
|
1264 } else { |
|
1265 // Else x is a new node we are keeping |
|
1266 // We do not need register_new_node_with_optimizer |
|
1267 // because set_type has already been called. |
|
1268 igvn->_worklist.push(x); |
|
1269 } |
|
1270 } |
|
1271 } |
|
1272 if (x != the_clone && the_clone != NULL) |
|
1273 igvn->remove_dead_node(the_clone); |
|
1274 phi->set_req(i, x); |
|
1275 } |
|
1276 if( wins > 0 ) { |
|
1277 // Record Phi |
|
1278 igvn->register_new_node_with_optimizer(phi); |
|
1279 return phi; |
|
1280 } else { |
|
1281 igvn->remove_dead_node(phi); |
|
1282 } |
|
1283 } |
|
1284 } |
1310 } |
1285 } |
1311 } |
1286 |
1312 |
1287 // Check for prior store with a different base or offset; make Load |
1313 // Check for prior store with a different base or offset; make Load |
1288 // independent. Skip through any number of them. Bail out if the stores |
1314 // independent. Skip through any number of them. Bail out if the stores |
1833 case T_FLOAT: return new (C, 4) StoreFNode(ctl, mem, adr, adr_type, val); |
1859 case T_FLOAT: return new (C, 4) StoreFNode(ctl, mem, adr, adr_type, val); |
1834 case T_DOUBLE: return new (C, 4) StoreDNode(ctl, mem, adr, adr_type, val); |
1860 case T_DOUBLE: return new (C, 4) StoreDNode(ctl, mem, adr, adr_type, val); |
1835 case T_ADDRESS: |
1861 case T_ADDRESS: |
1836 case T_OBJECT: |
1862 case T_OBJECT: |
1837 #ifdef _LP64 |
1863 #ifdef _LP64 |
1838 if (adr->bottom_type()->is_narrow() || |
1864 if (adr->bottom_type()->is_ptr_to_narrowoop() || |
1839 (UseCompressedOops && val->bottom_type()->isa_klassptr() && |
1865 (UseCompressedOops && val->bottom_type()->isa_klassptr() && |
1840 adr->bottom_type()->isa_rawptr())) { |
1866 adr->bottom_type()->isa_rawptr())) { |
1841 const TypePtr* type = val->bottom_type()->is_ptr(); |
1867 const TypePtr* type = val->bottom_type()->is_ptr(); |
1842 Node* cp = EncodePNode::encode(&gvn, val); |
1868 Node* cp = EncodePNode::encode(&gvn, val); |
1843 return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, cp); |
1869 return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, cp); |