26 #include "ci/bcEscapeAnalyzer.hpp" |
26 #include "ci/bcEscapeAnalyzer.hpp" |
27 #include "compiler/compileLog.hpp" |
27 #include "compiler/compileLog.hpp" |
28 #include "libadt/vectset.hpp" |
28 #include "libadt/vectset.hpp" |
29 #include "memory/allocation.hpp" |
29 #include "memory/allocation.hpp" |
30 #include "opto/c2compiler.hpp" |
30 #include "opto/c2compiler.hpp" |
|
31 #include "opto/arraycopynode.hpp" |
31 #include "opto/callnode.hpp" |
32 #include "opto/callnode.hpp" |
32 #include "opto/cfgnode.hpp" |
33 #include "opto/cfgnode.hpp" |
33 #include "opto/compile.hpp" |
34 #include "opto/compile.hpp" |
34 #include "opto/escape.hpp" |
35 #include "opto/escape.hpp" |
35 #include "opto/phaseX.hpp" |
36 #include "opto/phaseX.hpp" |
111 // Worklists used by EA. |
112 // Worklists used by EA. |
112 Unique_Node_List delayed_worklist; |
113 Unique_Node_List delayed_worklist; |
113 GrowableArray<Node*> alloc_worklist; |
114 GrowableArray<Node*> alloc_worklist; |
114 GrowableArray<Node*> ptr_cmp_worklist; |
115 GrowableArray<Node*> ptr_cmp_worklist; |
115 GrowableArray<Node*> storestore_worklist; |
116 GrowableArray<Node*> storestore_worklist; |
|
117 GrowableArray<ArrayCopyNode*> arraycopy_worklist; |
116 GrowableArray<PointsToNode*> ptnodes_worklist; |
118 GrowableArray<PointsToNode*> ptnodes_worklist; |
117 GrowableArray<JavaObjectNode*> java_objects_worklist; |
119 GrowableArray<JavaObjectNode*> java_objects_worklist; |
118 GrowableArray<JavaObjectNode*> non_escaped_worklist; |
120 GrowableArray<JavaObjectNode*> non_escaped_worklist; |
119 GrowableArray<FieldNode*> oop_fields_worklist; |
121 GrowableArray<FieldNode*> oop_fields_worklist; |
120 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) |
122 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) |
171 #ifdef ASSERT |
173 #ifdef ASSERT |
172 } else if (n->is_AddP()) { |
174 } else if (n->is_AddP()) { |
173 // Collect address nodes for graph verification. |
175 // Collect address nodes for graph verification. |
174 addp_worklist.append(n); |
176 addp_worklist.append(n); |
175 #endif |
177 #endif |
|
178 } else if (n->is_ArrayCopy()) { |
|
179 // Keep a list of ArrayCopy nodes so if one of its input is non |
|
180 // escaping, we can record a unique type |
|
181 arraycopy_worklist.append(n->as_ArrayCopy()); |
176 } |
182 } |
177 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
183 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
178 Node* m = n->fast_out(i); // Get user |
184 Node* m = n->fast_out(i); // Get user |
179 ideal_nodes.push(m); |
185 ideal_nodes.push(m); |
180 } |
186 } |
287 // 5. Separate memory graph for scalar replaceable allcations. |
293 // 5. Separate memory graph for scalar replaceable allcations. |
288 if (has_scalar_replaceable_candidates && |
294 if (has_scalar_replaceable_candidates && |
289 C->AliasLevel() >= 3 && EliminateAllocations) { |
295 C->AliasLevel() >= 3 && EliminateAllocations) { |
290 // Now use the escape information to create unique types for |
296 // Now use the escape information to create unique types for |
291 // scalar replaceable objects. |
297 // scalar replaceable objects. |
292 split_unique_types(alloc_worklist); |
298 split_unique_types(alloc_worklist, arraycopy_worklist); |
293 if (C->failing()) return false; |
299 if (C->failing()) return false; |
294 C->print_method(PHASE_AFTER_EA, 2); |
300 C->print_method(PHASE_AFTER_EA, 2); |
295 |
301 |
296 #ifdef ASSERT |
302 #ifdef ASSERT |
297 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { |
303 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { |
331 } |
337 } |
332 |
338 |
333 // Populate Connection Graph with PointsTo nodes and create simple |
339 // Populate Connection Graph with PointsTo nodes and create simple |
334 // connection graph edges. |
340 // connection graph edges. |
335 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { |
341 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { |
336 assert(!_verify, "this method sould not be called for verification"); |
342 assert(!_verify, "this method should not be called for verification"); |
337 PhaseGVN* igvn = _igvn; |
343 PhaseGVN* igvn = _igvn; |
338 uint n_idx = n->_idx; |
344 uint n_idx = n->_idx; |
339 PointsToNode* n_ptn = ptnode_adr(n_idx); |
345 PointsToNode* n_ptn = ptnode_adr(n_idx); |
340 if (n_ptn != NULL) |
346 if (n_ptn != NULL) |
341 return; // No need to redefine PointsTo node during first iteration. |
347 return; // No need to redefine PointsTo node during first iteration. |
899 case Op_CallLeafNoFP: |
905 case Op_CallLeafNoFP: |
900 // Most array copies are ArrayCopy nodes at this point but there |
906 // Most array copies are ArrayCopy nodes at this point but there |
901 // are still a few direct calls to the copy subroutines (See |
907 // are still a few direct calls to the copy subroutines (See |
902 // PhaseStringOpts::copy_string()) |
908 // PhaseStringOpts::copy_string()) |
903 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || |
909 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || |
904 (call->as_CallLeaf()->_name != NULL && |
910 call->as_CallLeaf()->is_call_to_arraycopystub(); |
905 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0); |
|
906 // fall through |
911 // fall through |
907 case Op_CallLeaf: { |
912 case Op_CallLeaf: { |
908 // Stub calls, objects do not escape but they are not scale replaceable. |
913 // Stub calls, objects do not escape but they are not scale replaceable. |
909 // Adjust escape state for outgoing arguments. |
914 // Adjust escape state for outgoing arguments. |
910 const TypeTuple * d = call->tf()->domain(); |
915 const TypeTuple * d = call->tf()->domain(); |
978 // source object. |
983 // source object. |
979 if (arg_esc >= PointsToNode::ArgEscape && |
984 if (arg_esc >= PointsToNode::ArgEscape && |
980 !arg_is_arraycopy_dest) { |
985 !arg_is_arraycopy_dest) { |
981 continue; |
986 continue; |
982 } |
987 } |
983 set_escape_state(arg_ptn, PointsToNode::ArgEscape); |
988 PointsToNode::EscapeState es = PointsToNode::ArgEscape; |
|
989 if (call->is_ArrayCopy()) { |
|
990 ArrayCopyNode* ac = call->as_ArrayCopy(); |
|
991 if (ac->is_clonebasic() || |
|
992 ac->is_arraycopy_validated() || |
|
993 ac->is_copyof_validated() || |
|
994 ac->is_copyofrange_validated()) { |
|
995 es = PointsToNode::NoEscape; |
|
996 } |
|
997 } |
|
998 set_escape_state(arg_ptn, es); |
984 if (arg_is_arraycopy_dest) { |
999 if (arg_is_arraycopy_dest) { |
985 Node* src = call->in(TypeFunc::Parms); |
1000 Node* src = call->in(TypeFunc::Parms); |
986 if (src->is_AddP()) { |
1001 if (src->is_AddP()) { |
987 src = get_addp_base(src); |
1002 src = get_addp_base(src); |
988 } |
1003 } |
992 // Special arraycopy edge: |
1007 // Special arraycopy edge: |
993 // A destination object's field can't have the source object |
1008 // A destination object's field can't have the source object |
994 // as base since objects escape states are not related. |
1009 // as base since objects escape states are not related. |
995 // Only escape state of destination object's fields affects |
1010 // Only escape state of destination object's fields affects |
996 // escape state of fields in source object. |
1011 // escape state of fields in source object. |
997 add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn); |
1012 add_arraycopy(call, es, src_ptn, arg_ptn); |
998 } |
1013 } |
999 } |
1014 } |
1000 } |
1015 } |
1001 } |
1016 } |
1002 break; |
1017 break; |
1270 es_changed = true; |
1285 es_changed = true; |
1271 } |
1286 } |
1272 if ((e->escape_state() < field_es) && |
1287 if ((e->escape_state() < field_es) && |
1273 e->is_Field() && ptn->is_JavaObject() && |
1288 e->is_Field() && ptn->is_JavaObject() && |
1274 e->as_Field()->is_oop()) { |
1289 e->as_Field()->is_oop()) { |
1275 // Change escape state of referenced fileds. |
1290 // Change escape state of referenced fields. |
1276 set_escape_state(e, field_es); |
1291 set_escape_state(e, field_es); |
1277 es_changed = true;; |
1292 es_changed = true; |
1278 } else if (e->escape_state() < es) { |
1293 } else if (e->escape_state() < es) { |
1279 set_escape_state(e, es); |
1294 set_escape_state(e, es); |
1280 es_changed = true;; |
1295 es_changed = true; |
1281 } |
1296 } |
1282 if (es_changed) { |
1297 if (es_changed) { |
1283 escape_worklist.push(e); |
1298 escape_worklist.push(e); |
1284 } |
1299 } |
1285 } |
1300 } |
1387 PointsToNode* arycp = j.get(); |
1402 PointsToNode* arycp = j.get(); |
1388 if (arycp->is_Arraycopy()) { |
1403 if (arycp->is_Arraycopy()) { |
1389 for (UseIterator k(arycp); k.has_next(); k.next()) { |
1404 for (UseIterator k(arycp); k.has_next(); k.next()) { |
1390 PointsToNode* abase = k.get(); |
1405 PointsToNode* abase = k.get(); |
1391 if (abase->arraycopy_dst() && abase != base) { |
1406 if (abase->arraycopy_dst() && abase != base) { |
1392 // Look for the same arracopy reference. |
1407 // Look for the same arraycopy reference. |
1393 add_fields_to_worklist(field, abase); |
1408 add_fields_to_worklist(field, abase); |
1394 } |
1409 } |
1395 } |
1410 } |
1396 } |
1411 } |
1397 } |
1412 } |
1467 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { |
1482 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { |
1468 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); |
1483 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); |
1469 int new_edges = 0; |
1484 int new_edges = 0; |
1470 Node* alloc = pta->ideal_node(); |
1485 Node* alloc = pta->ideal_node(); |
1471 if (init_val == phantom_obj) { |
1486 if (init_val == phantom_obj) { |
1472 // Do nothing for Allocate nodes since its fields values are "known". |
1487 // Do nothing for Allocate nodes since its fields values are |
1473 if (alloc->is_Allocate()) |
1488 // "known" unless they are initialized by arraycopy/clone. |
|
1489 if (alloc->is_Allocate() && !pta->arraycopy_dst()) |
1474 return 0; |
1490 return 0; |
1475 assert(alloc->as_CallStaticJava(), "sanity"); |
1491 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); |
1476 #ifdef ASSERT |
1492 #ifdef ASSERT |
1477 if (alloc->as_CallStaticJava()->method() == NULL) { |
1493 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { |
1478 const char* name = alloc->as_CallStaticJava()->_name; |
1494 const char* name = alloc->as_CallStaticJava()->_name; |
1479 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); |
1495 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); |
1480 } |
1496 } |
1481 #endif |
1497 #endif |
1482 // Non-escaped allocation returned from Java or runtime call have |
1498 // Non-escaped allocation returned from Java or runtime call have |
1621 // 1. An object is not scalar replaceable if the field into which it is |
1637 // 1. An object is not scalar replaceable if the field into which it is |
1622 // stored has unknown offset (stored into unknown element of an array). |
1638 // stored has unknown offset (stored into unknown element of an array). |
1623 // |
1639 // |
1624 for (UseIterator i(jobj); i.has_next(); i.next()) { |
1640 for (UseIterator i(jobj); i.has_next(); i.next()) { |
1625 PointsToNode* use = i.get(); |
1641 PointsToNode* use = i.get(); |
1626 assert(!use->is_Arraycopy(), "sanity"); |
1642 if (use->is_Arraycopy()) { |
|
1643 continue; |
|
1644 } |
1627 if (use->is_Field()) { |
1645 if (use->is_Field()) { |
1628 FieldNode* field = use->as_Field(); |
1646 FieldNode* field = use->as_Field(); |
1629 assert(field->is_oop() && field->scalar_replaceable() && |
1647 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); |
1630 field->fields_escape_state() == PointsToNode::NoEscape, "sanity"); |
|
1631 if (field->offset() == Type::OffsetBot) { |
1648 if (field->offset() == Type::OffsetBot) { |
1632 jobj->set_scalar_replaceable(false); |
1649 jobj->set_scalar_replaceable(false); |
1633 return; |
1650 return; |
1634 } |
1651 } |
1635 // 2. An object is not scalar replaceable if the field into which it is |
1652 // 2. An object is not scalar replaceable if the field into which it is |
1658 return; |
1675 return; |
1659 } |
1676 } |
1660 } |
1677 } |
1661 |
1678 |
1662 for (EdgeIterator j(jobj); j.has_next(); j.next()) { |
1679 for (EdgeIterator j(jobj); j.has_next(); j.next()) { |
|
1680 if (j.get()->is_Arraycopy()) { |
|
1681 continue; |
|
1682 } |
|
1683 |
1663 // Non-escaping object node should point only to field nodes. |
1684 // Non-escaping object node should point only to field nodes. |
1664 FieldNode* field = j.get()->as_Field(); |
1685 FieldNode* field = j.get()->as_Field(); |
1665 int offset = field->as_Field()->offset(); |
1686 int offset = field->as_Field()->offset(); |
1666 |
1687 |
1667 // 4. An object is not scalar replaceable if it has a field with unknown |
1688 // 4. An object is not scalar replaceable if it has a field with unknown |
2634 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { |
2655 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { |
2635 Node *proj_in = result->in(0); |
2656 Node *proj_in = result->in(0); |
2636 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { |
2657 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { |
2637 break; // hit one of our sentinels |
2658 break; // hit one of our sentinels |
2638 } else if (proj_in->is_Call()) { |
2659 } else if (proj_in->is_Call()) { |
|
2660 // ArrayCopy node processed here as well |
2639 CallNode *call = proj_in->as_Call(); |
2661 CallNode *call = proj_in->as_Call(); |
2640 if (!call->may_modify(toop, igvn)) { |
2662 if (!call->may_modify(toop, igvn)) { |
2641 result = call->in(TypeFunc::Memory); |
2663 result = call->in(TypeFunc::Memory); |
2642 } |
2664 } |
2643 } else if (proj_in->is_Initialize()) { |
2665 } else if (proj_in->is_Initialize()) { |
2646 // which contains this memory slice, otherwise skip over it. |
2668 // which contains this memory slice, otherwise skip over it. |
2647 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { |
2669 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { |
2648 result = proj_in->in(TypeFunc::Memory); |
2670 result = proj_in->in(TypeFunc::Memory); |
2649 } |
2671 } |
2650 } else if (proj_in->is_MemBar()) { |
2672 } else if (proj_in->is_MemBar()) { |
|
2673 if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && |
|
2674 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && |
|
2675 proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { |
|
2676 // clone |
|
2677 ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); |
|
2678 if (ac->may_modify(toop, igvn)) { |
|
2679 break; |
|
2680 } |
|
2681 } |
2651 result = proj_in->in(TypeFunc::Memory); |
2682 result = proj_in->in(TypeFunc::Memory); |
2652 } |
2683 } |
2653 } else if (result->is_MergeMem()) { |
2684 } else if (result->is_MergeMem()) { |
2654 MergeMemNode *mmem = result->as_MergeMem(); |
2685 MergeMemNode *mmem = result->as_MergeMem(); |
2655 result = step_through_mergemem(mmem, alias_idx, toop); |
2686 result = step_through_mergemem(mmem, alias_idx, toop); |
2722 // We start with allocations (and calls which may be allocations) on alloc_worklist. |
2753 // We start with allocations (and calls which may be allocations) on alloc_worklist. |
2723 // The processing is done in 4 phases: |
2754 // The processing is done in 4 phases: |
2724 // |
2755 // |
2725 // Phase 1: Process possible allocations from alloc_worklist. Create instance |
2756 // Phase 1: Process possible allocations from alloc_worklist. Create instance |
2726 // types for the CheckCastPP for allocations where possible. |
2757 // types for the CheckCastPP for allocations where possible. |
2727 // Propagate the the new types through users as follows: |
2758 // Propagate the new types through users as follows: |
2728 // casts and Phi: push users on alloc_worklist |
2759 // casts and Phi: push users on alloc_worklist |
2729 // AddP: cast Base and Address inputs to the instance type |
2760 // AddP: cast Base and Address inputs to the instance type |
2730 // push any AddP users on alloc_worklist and push any memnode |
2761 // push any AddP users on alloc_worklist and push any memnode |
2731 // users onto memnode_worklist. |
2762 // users onto memnode_worklist. |
2732 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and |
2763 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and |
2801 // 80 Phi 75 40 60 Memory alias_index=4 |
2832 // 80 Phi 75 40 60 Memory alias_index=4 |
2802 // 120 Phi 75 50 50 Memory alias_index=6 |
2833 // 120 Phi 75 50 50 Memory alias_index=6 |
2803 // 90 LoadP _ 120 30 ... alias_index=6 |
2834 // 90 LoadP _ 120 30 ... alias_index=6 |
2804 // 100 LoadP _ 80 20 ... alias_index=4 |
2835 // 100 LoadP _ 80 20 ... alias_index=4 |
2805 // |
2836 // |
2806 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { |
2837 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { |
2807 GrowableArray<Node *> memnode_worklist; |
2838 GrowableArray<Node *> memnode_worklist; |
2808 GrowableArray<PhiNode *> orig_phis; |
2839 GrowableArray<PhiNode *> orig_phis; |
2809 PhaseIterGVN *igvn = _igvn; |
2840 PhaseIterGVN *igvn = _igvn; |
2810 uint new_index_start = (uint) _compile->num_alias_types(); |
2841 uint new_index_start = (uint) _compile->num_alias_types(); |
2811 Arena* arena = Thread::current()->resource_area(); |
2842 Arena* arena = Thread::current()->resource_area(); |
2910 igvn->hash_insert(n); |
2941 igvn->hash_insert(n); |
2911 record_for_optimizer(n); |
2942 record_for_optimizer(n); |
2912 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { |
2943 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { |
2913 |
2944 |
2914 // First, put on the worklist all Field edges from Connection Graph |
2945 // First, put on the worklist all Field edges from Connection Graph |
2915 // which is more accurate then putting immediate users from Ideal Graph. |
2946 // which is more accurate than putting immediate users from Ideal Graph. |
2916 for (EdgeIterator e(ptn); e.has_next(); e.next()) { |
2947 for (EdgeIterator e(ptn); e.has_next(); e.next()) { |
2917 PointsToNode* tgt = e.get(); |
2948 PointsToNode* tgt = e.get(); |
|
2949 if (tgt->is_Arraycopy()) { |
|
2950 continue; |
|
2951 } |
2918 Node* use = tgt->ideal_node(); |
2952 Node* use = tgt->ideal_node(); |
2919 assert(tgt->is_Field() && use->is_AddP(), |
2953 assert(tgt->is_Field() && use->is_AddP(), |
2920 "only AddP nodes are Field edges in CG"); |
2954 "only AddP nodes are Field edges in CG"); |
2921 if (use->outcnt() > 0) { // Don't process dead nodes |
2955 if (use->outcnt() > 0) { // Don't process dead nodes |
2922 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); |
2956 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); |
3066 #endif |
3100 #endif |
3067 } |
3101 } |
3068 } |
3102 } |
3069 |
3103 |
3070 } |
3104 } |
|
3105 |
|
3106 // Go over all ArrayCopy nodes and if one of the inputs has a unique |
|
3107 // type, record it in the ArrayCopy node so we know what memory this |
|
3108 // node uses/modified. |
|
3109 for (int next = 0; next < arraycopy_worklist.length(); next++) { |
|
3110 ArrayCopyNode* ac = arraycopy_worklist.at(next); |
|
3111 Node* dest = ac->in(ArrayCopyNode::Dest); |
|
3112 if (dest->is_AddP()) { |
|
3113 dest = get_addp_base(dest); |
|
3114 } |
|
3115 JavaObjectNode* jobj = unique_java_object(dest); |
|
3116 if (jobj != NULL) { |
|
3117 Node *base = get_map(jobj->idx()); |
|
3118 if (base != NULL) { |
|
3119 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); |
|
3120 ac->_dest_type = base_t; |
|
3121 } |
|
3122 } |
|
3123 Node* src = ac->in(ArrayCopyNode::Src); |
|
3124 if (src->is_AddP()) { |
|
3125 src = get_addp_base(src); |
|
3126 } |
|
3127 jobj = unique_java_object(src); |
|
3128 if (jobj != NULL) { |
|
3129 Node* base = get_map(jobj->idx()); |
|
3130 if (base != NULL) { |
|
3131 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); |
|
3132 ac->_src_type = base_t; |
|
3133 } |
|
3134 } |
|
3135 } |
|
3136 |
3071 // New alias types were created in split_AddP(). |
3137 // New alias types were created in split_AddP(). |
3072 uint new_index_end = (uint) _compile->num_alias_types(); |
3138 uint new_index_end = (uint) _compile->num_alias_types(); |
3073 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); |
3139 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); |
3074 |
3140 |
3075 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and |
3141 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and |