author | never |
Mon, 18 Aug 2008 23:17:51 -0700 | |
changeset 1057 | 44220ef9a775 |
parent 1055 | f4fb9fb08038 |
child 1067 | f82e0a8cd438 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
670 | 2 |
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
// Portions of code courtesy of Clifford Click |
|
26 |
||
27 |
// Optimization - Graph Style |
|
28 |
||
29 |
#include "incls/_precompiled.incl" |
|
30 |
#include "incls/_memnode.cpp.incl" |
|
31 |
||
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
32 |
static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
33 |
|
1 | 34 |
//============================================================================= |
35 |
uint MemNode::size_of() const { return sizeof(*this); } |
|
36 |
||
37 |
const TypePtr *MemNode::adr_type() const { |
|
38 |
Node* adr = in(Address); |
|
39 |
const TypePtr* cross_check = NULL; |
|
40 |
DEBUG_ONLY(cross_check = _adr_type); |
|
41 |
return calculate_adr_type(adr->bottom_type(), cross_check); |
|
42 |
} |
|
43 |
||
44 |
#ifndef PRODUCT |
|
45 |
void MemNode::dump_spec(outputStream *st) const { |
|
46 |
if (in(Address) == NULL) return; // node is dead |
|
47 |
#ifndef ASSERT |
|
48 |
// fake the missing field |
|
49 |
const TypePtr* _adr_type = NULL; |
|
50 |
if (in(Address) != NULL) |
|
51 |
_adr_type = in(Address)->bottom_type()->isa_ptr(); |
|
52 |
#endif |
|
53 |
dump_adr_type(this, _adr_type, st); |
|
54 |
||
55 |
Compile* C = Compile::current(); |
|
56 |
if( C->alias_type(_adr_type)->is_volatile() ) |
|
57 |
st->print(" Volatile!"); |
|
58 |
} |
|
59 |
||
60 |
void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) { |
|
61 |
st->print(" @"); |
|
62 |
if (adr_type == NULL) { |
|
63 |
st->print("NULL"); |
|
64 |
} else { |
|
65 |
adr_type->dump_on(st); |
|
66 |
Compile* C = Compile::current(); |
|
67 |
Compile::AliasType* atp = NULL; |
|
68 |
if (C->have_alias_type(adr_type)) atp = C->alias_type(adr_type); |
|
69 |
if (atp == NULL) |
|
70 |
st->print(", idx=?\?;"); |
|
71 |
else if (atp->index() == Compile::AliasIdxBot) |
|
72 |
st->print(", idx=Bot;"); |
|
73 |
else if (atp->index() == Compile::AliasIdxTop) |
|
74 |
st->print(", idx=Top;"); |
|
75 |
else if (atp->index() == Compile::AliasIdxRaw) |
|
76 |
st->print(", idx=Raw;"); |
|
77 |
else { |
|
78 |
ciField* field = atp->field(); |
|
79 |
if (field) { |
|
80 |
st->print(", name="); |
|
81 |
field->print_name_on(st); |
|
82 |
} |
|
83 |
st->print(", idx=%d;", atp->index()); |
|
84 |
} |
|
85 |
} |
|
86 |
} |
|
87 |
||
88 |
extern void print_alias_types(); |
|
89 |
||
90 |
#endif |
|
91 |
||
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
92 |
Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
93 |
const TypeOopPtr *tinst = t_adr->isa_oopptr(); |
769 | 94 |
if (tinst == NULL || !tinst->is_known_instance_field()) |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
95 |
return mchain; // don't try to optimize non-instance types |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
96 |
uint instance_id = tinst->instance_id(); |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
97 |
Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory); |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
98 |
Node *prev = NULL; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
99 |
Node *result = mchain; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
100 |
while (prev != result) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
101 |
prev = result; |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
102 |
if (result == start_mem) |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
103 |
break; // hit one of our sentinals |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
104 |
// skip over a call which does not affect this memory slice |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
105 |
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
106 |
Node *proj_in = result->in(0); |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
107 |
if (proj_in->is_Allocate() && proj_in->_idx == instance_id) { |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
108 |
break; // hit one of our sentinals |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
109 |
} else if (proj_in->is_Call()) { |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
110 |
CallNode *call = proj_in->as_Call(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
111 |
if (!call->may_modify(t_adr, phase)) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
112 |
result = call->in(TypeFunc::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
113 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
114 |
} else if (proj_in->is_Initialize()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
115 |
AllocateNode* alloc = proj_in->as_Initialize()->allocation(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
116 |
// Stop if this is the initialization for the object instance which |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
117 |
// which contains this memory slice, otherwise skip over it. |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
118 |
if (alloc != NULL && alloc->_idx != instance_id) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
119 |
result = proj_in->in(TypeFunc::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
120 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
121 |
} else if (proj_in->is_MemBar()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
122 |
result = proj_in->in(TypeFunc::Memory); |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
123 |
} else { |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
124 |
assert(false, "unexpected projection"); |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
125 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
126 |
} else if (result->is_MergeMem()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
127 |
result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
128 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
129 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
130 |
return result; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
131 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
132 |
|
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
133 |
Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
134 |
const TypeOopPtr *t_oop = t_adr->isa_oopptr(); |
769 | 135 |
bool is_instance = (t_oop != NULL) && t_oop->is_known_instance_field(); |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
136 |
PhaseIterGVN *igvn = phase->is_IterGVN(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
137 |
Node *result = mchain; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
138 |
result = optimize_simple_memory_chain(result, t_adr, phase); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
139 |
if (is_instance && igvn != NULL && result->is_Phi()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
140 |
PhiNode *mphi = result->as_Phi(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
141 |
assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
142 |
const TypePtr *t = mphi->adr_type(); |
589 | 143 |
if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || |
769 | 144 |
t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && |
955 | 145 |
t->is_oopptr()->cast_to_exactness(true) |
146 |
->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) |
|
147 |
->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) { |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
148 |
// clone the Phi with our address type |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
149 |
result = mphi->split_out_instance(t_adr, igvn); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
150 |
} else { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
151 |
assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
152 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
153 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
154 |
return result; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
155 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
156 |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
157 |
static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
158 |
uint alias_idx = phase->C->get_alias_index(tp); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
159 |
Node *mem = mmem; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
160 |
#ifdef ASSERT |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
161 |
{ |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
162 |
// Check that current type is consistent with the alias index used during graph construction |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
163 |
assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
164 |
bool consistent = adr_check == NULL || adr_check->empty() || |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
165 |
phase->C->must_alias(adr_check, alias_idx ); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
166 |
// Sometimes dead array references collapse to a[-1], a[-2], or a[-3] |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
167 |
if( !consistent && adr_check != NULL && !adr_check->empty() && |
595
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
168 |
tp->isa_aryptr() && tp->offset() == Type::OffsetBot && |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
169 |
adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
170 |
( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() || |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
171 |
adr_check->offset() == oopDesc::klass_offset_in_bytes() || |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
172 |
adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
173 |
// don't assert if it is dead code. |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
174 |
consistent = true; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
175 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
176 |
if( !consistent ) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
177 |
st->print("alias_idx==%d, adr_check==", alias_idx); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
178 |
if( adr_check == NULL ) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
179 |
st->print("NULL"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
180 |
} else { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
181 |
adr_check->dump(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
182 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
183 |
st->cr(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
184 |
print_alias_types(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
185 |
assert(consistent, "adr_check must match alias idx"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
186 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
187 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
188 |
#endif |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
189 |
// TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
190 |
// means an array I have not precisely typed yet. Do not do any |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
191 |
// alias stuff with it any time soon. |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
192 |
const TypeOopPtr *tinst = tp->isa_oopptr(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
193 |
if( tp->base() != Type::AnyPtr && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
194 |
!(tinst && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
195 |
tinst->klass()->is_java_lang_Object() && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
196 |
tinst->offset() == Type::OffsetBot) ) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
197 |
// compress paths and change unreachable cycles to TOP |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
198 |
// If not, we can update the input infinitely along a MergeMem cycle |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
199 |
// Equivalent code in PhiNode::Ideal |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
200 |
Node* m = phase->transform(mmem); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
201 |
// If tranformed to a MergeMem, get the desired slice |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
202 |
// Otherwise the returned node represents memory for every slice |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
203 |
mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
204 |
// Update input if it is progress over what we have now |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
205 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
206 |
return mem; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
207 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
208 |
|
1 | 209 |
//--------------------------Ideal_common--------------------------------------- |
210 |
// Look for degenerate control and memory inputs. Bypass MergeMem inputs. |
|
211 |
// Unhook non-raw memories from complete (macro-expanded) initializations. |
|
212 |
Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { |
|
213 |
// If our control input is a dead region, kill all below the region |
|
214 |
Node *ctl = in(MemNode::Control); |
|
215 |
if (ctl && remove_dead_region(phase, can_reshape)) |
|
216 |
return this; |
|
217 |
||
218 |
// Ignore if memory is dead, or self-loop |
|
219 |
Node *mem = in(MemNode::Memory); |
|
220 |
if( phase->type( mem ) == Type::TOP ) return NodeSentinel; // caller will return NULL |
|
221 |
assert( mem != this, "dead loop in MemNode::Ideal" ); |
|
222 |
||
223 |
Node *address = in(MemNode::Address); |
|
224 |
const Type *t_adr = phase->type( address ); |
|
225 |
if( t_adr == Type::TOP ) return NodeSentinel; // caller will return NULL |
|
226 |
||
227 |
// Avoid independent memory operations |
|
228 |
Node* old_mem = mem; |
|
229 |
||
209 | 230 |
// The code which unhooks non-raw memories from complete (macro-expanded) |
231 |
// initializations was removed. After macro-expansion all stores catched |
|
232 |
// by Initialize node became raw stores and there is no information |
|
233 |
// which memory slices they modify. So it is unsafe to move any memory |
|
234 |
// operation above these stores. Also in most cases hooked non-raw memories |
|
235 |
// were already unhooked by using information from detect_ptr_independence() |
|
236 |
// and find_previous_store(). |
|
1 | 237 |
|
238 |
if (mem->is_MergeMem()) { |
|
239 |
MergeMemNode* mmem = mem->as_MergeMem(); |
|
240 |
const TypePtr *tp = t_adr->is_ptr(); |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
241 |
|
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
242 |
mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty); |
1 | 243 |
} |
244 |
||
245 |
if (mem != old_mem) { |
|
246 |
set_req(MemNode::Memory, mem); |
|
247 |
return this; |
|
248 |
} |
|
249 |
||
250 |
// let the subclass continue analyzing... |
|
251 |
return NULL; |
|
252 |
} |
|
253 |
||
254 |
// Helper function for proving some simple control dominations. |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
255 |
// Attempt to prove that all control inputs of 'dom' dominate 'sub'. |
1 | 256 |
// Already assumes that 'dom' is available at 'sub', and that 'sub' |
257 |
// is not a constant (dominated by the method's StartNode). |
|
258 |
// Used by MemNode::find_previous_store to prove that the |
|
259 |
// control input of a memory operation predates (dominates) |
|
260 |
// an allocation it wants to look past. |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
261 |
bool MemNode::all_controls_dominate(Node* dom, Node* sub) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
262 |
if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
263 |
return false; // Conservative answer for dead code |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
264 |
|
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
265 |
// Check 'dom'. Skip Proj and CatchProj nodes. |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
266 |
dom = dom->find_exact_control(dom); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
267 |
if (dom == NULL || dom->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
268 |
return false; // Conservative answer for dead code |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
269 |
|
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
270 |
if (dom == sub) { |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
271 |
// For the case when, for example, 'sub' is Initialize and the original |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
272 |
// 'dom' is Proj node of the 'sub'. |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
273 |
return false; |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
274 |
} |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
275 |
|
581
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
276 |
if (dom->is_Con() || dom->is_Start() || dom->is_Root() || dom == sub) |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
277 |
return true; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
278 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
279 |
// 'dom' dominates 'sub' if its control edge and control edges |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
280 |
// of all its inputs dominate or equal to sub's control edge. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
281 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
282 |
// Currently 'sub' is either Allocate, Initialize or Start nodes. |
589 | 283 |
// Or Region for the check in LoadNode::Ideal(); |
284 |
// 'sub' should have sub->in(0) != NULL. |
|
285 |
assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() || |
|
286 |
sub->is_Region(), "expecting only these nodes"); |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
287 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
288 |
// Get control edge of 'sub'. |
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
289 |
Node* orig_sub = sub; |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
290 |
sub = sub->find_exact_control(sub->in(0)); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
291 |
if (sub == NULL || sub->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
292 |
return false; // Conservative answer for dead code |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
293 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
294 |
assert(sub->is_CFG(), "expecting control"); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
295 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
296 |
if (sub == dom) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
297 |
return true; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
298 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
299 |
if (sub->is_Start() || sub->is_Root()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
300 |
return false; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
301 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
302 |
{ |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
303 |
// Check all control edges of 'dom'. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
304 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
305 |
ResourceMark rm; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
306 |
Arena* arena = Thread::current()->resource_area(); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
307 |
Node_List nlist(arena); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
308 |
Unique_Node_List dom_list(arena); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
309 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
310 |
dom_list.push(dom); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
311 |
bool only_dominating_controls = false; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
312 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
313 |
for (uint next = 0; next < dom_list.size(); next++) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
314 |
Node* n = dom_list.at(next); |
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
315 |
if (n == orig_sub) |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
316 |
return false; // One of dom's inputs dominated by sub. |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
317 |
if (!n->is_CFG() && n->pinned()) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
318 |
// Check only own control edge for pinned non-control nodes. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
319 |
n = n->find_exact_control(n->in(0)); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
320 |
if (n == NULL || n->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
321 |
return false; // Conservative answer for dead code |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
322 |
assert(n->is_CFG(), "expecting control"); |
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
323 |
dom_list.push(n); |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
324 |
} else if (n->is_Con() || n->is_Start() || n->is_Root()) { |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
325 |
only_dominating_controls = true; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
326 |
} else if (n->is_CFG()) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
327 |
if (n->dominates(sub, nlist)) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
328 |
only_dominating_controls = true; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
329 |
else |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
330 |
return false; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
331 |
} else { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
332 |
// First, own control edge. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
333 |
Node* m = n->find_exact_control(n->in(0)); |
581
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
334 |
if (m != NULL) { |
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
335 |
if (m->is_top()) |
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
336 |
return false; // Conservative answer for dead code |
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
337 |
dom_list.push(m); |
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
338 |
} |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
339 |
// Now, the rest of edges. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
340 |
uint cnt = n->req(); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
341 |
for (uint i = 1; i < cnt; i++) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
342 |
m = n->find_exact_control(n->in(i)); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
343 |
if (m == NULL || m->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
344 |
continue; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
345 |
dom_list.push(m); |
1 | 346 |
} |
347 |
} |
|
348 |
} |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
349 |
return only_dominating_controls; |
1 | 350 |
} |
351 |
} |
|
352 |
||
353 |
//---------------------detect_ptr_independence--------------------------------- |
|
354 |
// Used by MemNode::find_previous_store to prove that two base |
|
355 |
// pointers are never equal. |
|
356 |
// The pointers are accompanied by their associated allocations, |
|
357 |
// if any, which have been previously discovered by the caller. |
|
358 |
bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1, |
|
359 |
Node* p2, AllocateNode* a2, |
|
360 |
PhaseTransform* phase) { |
|
361 |
// Attempt to prove that these two pointers cannot be aliased. |
|
362 |
// They may both manifestly be allocations, and they should differ. |
|
363 |
// Or, if they are not both allocations, they can be distinct constants. |
|
364 |
// Otherwise, one is an allocation and the other a pre-existing value. |
|
365 |
if (a1 == NULL && a2 == NULL) { // neither an allocation |
|
366 |
return (p1 != p2) && p1->is_Con() && p2->is_Con(); |
|
367 |
} else if (a1 != NULL && a2 != NULL) { // both allocations |
|
368 |
return (a1 != a2); |
|
369 |
} else if (a1 != NULL) { // one allocation a1 |
|
370 |
// (Note: p2->is_Con implies p2->in(0)->is_Root, which dominates.) |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
371 |
return all_controls_dominate(p2, a1); |
1 | 372 |
} else { //(a2 != NULL) // one allocation a2 |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
373 |
return all_controls_dominate(p1, a2); |
1 | 374 |
} |
375 |
return false; |
|
376 |
} |
|
377 |
||
378 |
||
379 |
// The logic for reordering loads and stores uses four steps: |
|
380 |
// (a) Walk carefully past stores and initializations which we |
|
381 |
// can prove are independent of this load. |
|
382 |
// (b) Observe that the next memory state makes an exact match |
|
383 |
// with self (load or store), and locate the relevant store. |
|
384 |
// (c) Ensure that, if we were to wire self directly to the store, |
|
385 |
// the optimizer would fold it up somehow. |
|
386 |
// (d) Do the rewiring, and return, depending on some other part of |
|
387 |
// the optimizer to fold up the load. |
|
388 |
// This routine handles steps (a) and (b). Steps (c) and (d) are |
|
389 |
// specific to loads and stores, so they are handled by the callers. |
|
390 |
// (Currently, only LoadNode::Ideal has steps (c), (d). More later.) |
|
391 |
// |
|
392 |
Node* MemNode::find_previous_store(PhaseTransform* phase) { |
|
393 |
Node* ctrl = in(MemNode::Control); |
|
394 |
Node* adr = in(MemNode::Address); |
|
395 |
intptr_t offset = 0; |
|
396 |
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
397 |
AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); |
|
398 |
||
399 |
if (offset == Type::OffsetBot) |
|
400 |
return NULL; // cannot unalias unless there are precise offsets |
|
401 |
||
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
402 |
const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
403 |
|
1 | 404 |
intptr_t size_in_bytes = memory_size(); |
405 |
||
406 |
Node* mem = in(MemNode::Memory); // start searching here... |
|
407 |
||
408 |
int cnt = 50; // Cycle limiter |
|
409 |
for (;;) { // While we can dance past unrelated stores... |
|
410 |
if (--cnt < 0) break; // Caught in cycle or a complicated dance? |
|
411 |
||
412 |
if (mem->is_Store()) { |
|
413 |
Node* st_adr = mem->in(MemNode::Address); |
|
414 |
intptr_t st_offset = 0; |
|
415 |
Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); |
|
416 |
if (st_base == NULL) |
|
417 |
break; // inscrutable pointer |
|
418 |
if (st_offset != offset && st_offset != Type::OffsetBot) { |
|
419 |
const int MAX_STORE = BytesPerLong; |
|
420 |
if (st_offset >= offset + size_in_bytes || |
|
421 |
st_offset <= offset - MAX_STORE || |
|
422 |
st_offset <= offset - mem->as_Store()->memory_size()) { |
|
423 |
// Success: The offsets are provably independent. |
|
424 |
// (You may ask, why not just test st_offset != offset and be done? |
|
425 |
// The answer is that stores of different sizes can co-exist |
|
426 |
// in the same sequence of RawMem effects. We sometimes initialize |
|
427 |
// a whole 'tile' of array elements with a single jint or jlong.) |
|
428 |
mem = mem->in(MemNode::Memory); |
|
429 |
continue; // (a) advance through independent store memory |
|
430 |
} |
|
431 |
} |
|
432 |
if (st_base != base && |
|
433 |
detect_ptr_independence(base, alloc, |
|
434 |
st_base, |
|
435 |
AllocateNode::Ideal_allocation(st_base, phase), |
|
436 |
phase)) { |
|
437 |
// Success: The bases are provably independent. |
|
438 |
mem = mem->in(MemNode::Memory); |
|
439 |
continue; // (a) advance through independent store memory |
|
440 |
} |
|
441 |
||
442 |
// (b) At this point, if the bases or offsets do not agree, we lose, |
|
443 |
// since we have not managed to prove 'this' and 'mem' independent. |
|
444 |
if (st_base == base && st_offset == offset) { |
|
445 |
return mem; // let caller handle steps (c), (d) |
|
446 |
} |
|
447 |
||
448 |
} else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { |
|
449 |
InitializeNode* st_init = mem->in(0)->as_Initialize(); |
|
450 |
AllocateNode* st_alloc = st_init->allocation(); |
|
451 |
if (st_alloc == NULL) |
|
452 |
break; // something degenerated |
|
453 |
bool known_identical = false; |
|
454 |
bool known_independent = false; |
|
455 |
if (alloc == st_alloc) |
|
456 |
known_identical = true; |
|
457 |
else if (alloc != NULL) |
|
458 |
known_independent = true; |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
459 |
else if (all_controls_dominate(this, st_alloc)) |
1 | 460 |
known_independent = true; |
461 |
||
462 |
if (known_independent) { |
|
463 |
// The bases are provably independent: Either they are |
|
464 |
// manifestly distinct allocations, or else the control |
|
465 |
// of this load dominates the store's allocation. |
|
466 |
int alias_idx = phase->C->get_alias_index(adr_type()); |
|
467 |
if (alias_idx == Compile::AliasIdxRaw) { |
|
468 |
mem = st_alloc->in(TypeFunc::Memory); |
|
469 |
} else { |
|
470 |
mem = st_init->memory(alias_idx); |
|
471 |
} |
|
472 |
continue; // (a) advance through independent store memory |
|
473 |
} |
|
474 |
||
475 |
// (b) at this point, if we are not looking at a store initializing |
|
476 |
// the same allocation we are loading from, we lose. |
|
477 |
if (known_identical) { |
|
478 |
// From caller, can_see_stored_value will consult find_captured_store. |
|
479 |
return mem; // let caller handle steps (c), (d) |
|
480 |
} |
|
481 |
||
769 | 482 |
} else if (addr_t != NULL && addr_t->is_known_instance_field()) { |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
483 |
// Can't use optimize_simple_memory_chain() since it needs PhaseGVN. |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
484 |
if (mem->is_Proj() && mem->in(0)->is_Call()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
485 |
CallNode *call = mem->in(0)->as_Call(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
486 |
if (!call->may_modify(addr_t, phase)) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
487 |
mem = call->in(TypeFunc::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
488 |
continue; // (a) advance through independent call memory |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
489 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
490 |
} else if (mem->is_Proj() && mem->in(0)->is_MemBar()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
491 |
mem = mem->in(0)->in(TypeFunc::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
492 |
continue; // (a) advance through independent MemBar memory |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
493 |
} else if (mem->is_MergeMem()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
494 |
int alias_idx = phase->C->get_alias_index(adr_type()); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
495 |
mem = mem->as_MergeMem()->memory_at(alias_idx); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
496 |
continue; // (a) advance through independent MergeMem memory |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
497 |
} |
1 | 498 |
} |
499 |
||
500 |
// Unless there is an explicit 'continue', we must bail out here, |
|
501 |
// because 'mem' is an inscrutable memory state (e.g., a call). |
|
502 |
break; |
|
503 |
} |
|
504 |
||
505 |
return NULL; // bail out |
|
506 |
} |
|
507 |
||
508 |
//----------------------calculate_adr_type------------------------------------- |
|
509 |
// Helper function. Notices when the given type of address hits top or bottom. |
|
510 |
// Also, asserts a cross-check of the type against the expected address type. |
|
511 |
const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) { |
|
512 |
if (t == Type::TOP) return NULL; // does not touch memory any more? |
|
513 |
#ifdef PRODUCT |
|
514 |
cross_check = NULL; |
|
515 |
#else |
|
516 |
if (!VerifyAliases || is_error_reported() || Node::in_dump()) cross_check = NULL; |
|
517 |
#endif |
|
518 |
const TypePtr* tp = t->isa_ptr(); |
|
519 |
if (tp == NULL) { |
|
520 |
assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide"); |
|
521 |
return TypePtr::BOTTOM; // touches lots of memory |
|
522 |
} else { |
|
523 |
#ifdef ASSERT |
|
524 |
// %%%% [phh] We don't check the alias index if cross_check is |
|
525 |
// TypeRawPtr::BOTTOM. Needs to be investigated. |
|
526 |
if (cross_check != NULL && |
|
527 |
cross_check != TypePtr::BOTTOM && |
|
528 |
cross_check != TypeRawPtr::BOTTOM) { |
|
529 |
// Recheck the alias index, to see if it has changed (due to a bug). |
|
530 |
Compile* C = Compile::current(); |
|
531 |
assert(C->get_alias_index(cross_check) == C->get_alias_index(tp), |
|
532 |
"must stay in the original alias category"); |
|
533 |
// The type of the address must be contained in the adr_type, |
|
534 |
// disregarding "null"-ness. |
|
535 |
// (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.) |
|
536 |
const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr(); |
|
537 |
assert(cross_check->meet(tp_notnull) == cross_check, |
|
538 |
"real address must not escape from expected memory type"); |
|
539 |
} |
|
540 |
#endif |
|
541 |
return tp; |
|
542 |
} |
|
543 |
} |
|
544 |
||
545 |
//------------------------adr_phi_is_loop_invariant---------------------------- |
|
546 |
// A helper function for Ideal_DU_postCCP to check if a Phi in a counted |
|
547 |
// loop is loop invariant. Make a quick traversal of Phi and associated |
|
548 |
// CastPP nodes, looking to see if they are a closed group within the loop. |
|
549 |
bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) { |
|
550 |
// The idea is that the phi-nest must boil down to only CastPP nodes |
|
551 |
// with the same data. This implies that any path into the loop already |
|
552 |
// includes such a CastPP, and so the original cast, whatever its input, |
|
553 |
// must be covered by an equivalent cast, with an earlier control input. |
|
554 |
ResourceMark rm; |
|
555 |
||
556 |
// The loop entry input of the phi should be the unique dominating |
|
557 |
// node for every Phi/CastPP in the loop. |
|
558 |
Unique_Node_List closure; |
|
559 |
closure.push(adr_phi->in(LoopNode::EntryControl)); |
|
560 |
||
561 |
// Add the phi node and the cast to the worklist. |
|
562 |
Unique_Node_List worklist; |
|
563 |
worklist.push(adr_phi); |
|
564 |
if( cast != NULL ){ |
|
565 |
if( !cast->is_ConstraintCast() ) return false; |
|
566 |
worklist.push(cast); |
|
567 |
} |
|
568 |
||
569 |
// Begin recursive walk of phi nodes. |
|
570 |
while( worklist.size() ){ |
|
571 |
// Take a node off the worklist |
|
572 |
Node *n = worklist.pop(); |
|
573 |
if( !closure.member(n) ){ |
|
574 |
// Add it to the closure. |
|
575 |
closure.push(n); |
|
576 |
// Make a sanity check to ensure we don't waste too much time here. |
|
577 |
if( closure.size() > 20) return false; |
|
578 |
// This node is OK if: |
|
579 |
// - it is a cast of an identical value |
|
580 |
// - or it is a phi node (then we add its inputs to the worklist) |
|
581 |
// Otherwise, the node is not OK, and we presume the cast is not invariant |
|
582 |
if( n->is_ConstraintCast() ){ |
|
583 |
worklist.push(n->in(1)); |
|
584 |
} else if( n->is_Phi() ) { |
|
585 |
for( uint i = 1; i < n->req(); i++ ) { |
|
586 |
worklist.push(n->in(i)); |
|
587 |
} |
|
588 |
} else { |
|
589 |
return false; |
|
590 |
} |
|
591 |
} |
|
592 |
} |
|
593 |
||
594 |
// Quit when the worklist is empty, and we've found no offending nodes. |
|
595 |
return true; |
|
596 |
} |
|
597 |
||
598 |
//------------------------------Ideal_DU_postCCP------------------------------- |
|
599 |
// Find any cast-away of null-ness and keep its control. Null cast-aways are |
|
600 |
// going away in this pass and we need to make this memory op depend on the |
|
601 |
// gating null check. |
|
589 | 602 |
Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { |
603 |
return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address)); |
|
604 |
} |
|
1 | 605 |
|
606 |
// I tried to leave the CastPP's in. This makes the graph more accurate in |
|
607 |
// some sense; we get to keep around the knowledge that an oop is not-null |
|
608 |
// after some test. Alas, the CastPP's interfere with GVN (some values are |
|
609 |
// the regular oop, some are the CastPP of the oop, all merge at Phi's which |
|
610 |
// cannot collapse, etc). This cost us 10% on SpecJVM, even when I removed |
|
611 |
// some of the more trivial cases in the optimizer. Removing more useless |
|
612 |
// Phi's started allowing Loads to illegally float above null checks. I gave |
|
613 |
// up on this approach. CNC 10/20/2000 |
|
589 | 614 |
// This static method may be called not from MemNode (EncodePNode calls it). |
615 |
// Only the control edge of the node 'n' might be updated. |
|
616 |
Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) { |
|
1 | 617 |
Node *skipped_cast = NULL; |
618 |
// Need a null check? Regular static accesses do not because they are |
|
619 |
// from constant addresses. Array ops are gated by the range check (which |
|
620 |
// always includes a NULL check). Just check field ops. |
|
589 | 621 |
if( n->in(MemNode::Control) == NULL ) { |
1 | 622 |
// Scan upwards for the highest location we can place this memory op. |
623 |
while( true ) { |
|
624 |
switch( adr->Opcode() ) { |
|
625 |
||
626 |
case Op_AddP: // No change to NULL-ness, so peek thru AddP's |
|
627 |
adr = adr->in(AddPNode::Base); |
|
628 |
continue; |
|
629 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
630 |
case Op_DecodeN: // No change to NULL-ness, so peek thru |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
631 |
adr = adr->in(1); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
632 |
continue; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
633 |
|
1 | 634 |
case Op_CastPP: |
635 |
// If the CastPP is useless, just peek on through it. |
|
636 |
if( ccp->type(adr) == ccp->type(adr->in(1)) ) { |
|
637 |
// Remember the cast that we've peeked though. If we peek |
|
638 |
// through more than one, then we end up remembering the highest |
|
639 |
// one, that is, if in a loop, the one closest to the top. |
|
640 |
skipped_cast = adr; |
|
641 |
adr = adr->in(1); |
|
642 |
continue; |
|
643 |
} |
|
644 |
// CastPP is going away in this pass! We need this memory op to be |
|
645 |
// control-dependent on the test that is guarding the CastPP. |
|
589 | 646 |
ccp->hash_delete(n); |
647 |
n->set_req(MemNode::Control, adr->in(0)); |
|
648 |
ccp->hash_insert(n); |
|
649 |
return n; |
|
1 | 650 |
|
651 |
case Op_Phi: |
|
652 |
// Attempt to float above a Phi to some dominating point. |
|
653 |
if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) { |
|
654 |
// If we've already peeked through a Cast (which could have set the |
|
655 |
// control), we can't float above a Phi, because the skipped Cast |
|
656 |
// may not be loop invariant. |
|
657 |
if (adr_phi_is_loop_invariant(adr, skipped_cast)) { |
|
658 |
adr = adr->in(1); |
|
659 |
continue; |
|
660 |
} |
|
661 |
} |
|
662 |
||
663 |
// Intentional fallthrough! |
|
664 |
||
665 |
// No obvious dominating point. The mem op is pinned below the Phi |
|
666 |
// by the Phi itself. If the Phi goes away (no true value is merged) |
|
667 |
// then the mem op can float, but not indefinitely. It must be pinned |
|
668 |
// behind the controls leading to the Phi. |
|
669 |
case Op_CheckCastPP: |
|
670 |
// These usually stick around to change address type, however a |
|
671 |
// useless one can be elided and we still need to pick up a control edge |
|
672 |
if (adr->in(0) == NULL) { |
|
673 |
// This CheckCastPP node has NO control and is likely useless. But we |
|
674 |
// need check further up the ancestor chain for a control input to keep |
|
675 |
// the node in place. 4959717. |
|
676 |
skipped_cast = adr; |
|
677 |
adr = adr->in(1); |
|
678 |
continue; |
|
679 |
} |
|
589 | 680 |
ccp->hash_delete(n); |
681 |
n->set_req(MemNode::Control, adr->in(0)); |
|
682 |
ccp->hash_insert(n); |
|
683 |
return n; |
|
1 | 684 |
|
685 |
// List of "safe" opcodes; those that implicitly block the memory |
|
686 |
// op below any null check. |
|
687 |
case Op_CastX2P: // no null checks on native pointers |
|
688 |
case Op_Parm: // 'this' pointer is not null |
|
689 |
case Op_LoadP: // Loading from within a klass |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
690 |
case Op_LoadN: // Loading from within a klass |
1 | 691 |
case Op_LoadKlass: // Loading from within a klass |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
692 |
case Op_LoadNKlass: // Loading from within a klass |
1 | 693 |
case Op_ConP: // Loading from a klass |
589 | 694 |
case Op_ConN: // Loading from a klass |
1 | 695 |
case Op_CreateEx: // Sucking up the guts of an exception oop |
696 |
case Op_Con: // Reading from TLS |
|
697 |
case Op_CMoveP: // CMoveP is pinned |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
698 |
case Op_CMoveN: // CMoveN is pinned |
1 | 699 |
break; // No progress |
700 |
||
701 |
case Op_Proj: // Direct call to an allocation routine |
|
702 |
case Op_SCMemProj: // Memory state from store conditional ops |
|
703 |
#ifdef ASSERT |
|
704 |
{ |
|
705 |
assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value"); |
|
706 |
const Node* call = adr->in(0); |
|
589 | 707 |
if (call->is_CallJava()) { |
708 |
const CallJavaNode* call_java = call->as_CallJava(); |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
709 |
const TypeTuple *r = call_java->tf()->range(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
710 |
assert(r->cnt() > TypeFunc::Parms, "must return value"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
711 |
const Type* ret_type = r->field_at(TypeFunc::Parms); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
712 |
assert(ret_type && ret_type->isa_ptr(), "must return pointer"); |
1 | 713 |
// We further presume that this is one of |
714 |
// new_instance_Java, new_array_Java, or |
|
715 |
// the like, but do not assert for this. |
|
716 |
} else if (call->is_Allocate()) { |
|
717 |
// similar case to new_instance_Java, etc. |
|
718 |
} else if (!call->is_CallLeaf()) { |
|
719 |
// Projections from fetch_oop (OSR) are allowed as well. |
|
720 |
ShouldNotReachHere(); |
|
721 |
} |
|
722 |
} |
|
723 |
#endif |
|
724 |
break; |
|
725 |
default: |
|
726 |
ShouldNotReachHere(); |
|
727 |
} |
|
728 |
break; |
|
729 |
} |
|
730 |
} |
|
731 |
||
732 |
return NULL; // No progress |
|
733 |
} |
|
734 |
||
735 |
||
736 |
//============================================================================= |
|
737 |
uint LoadNode::size_of() const { return sizeof(*this); } |
|
738 |
uint LoadNode::cmp( const Node &n ) const |
|
739 |
{ return !Type::cmp( _type, ((LoadNode&)n)._type ); } |
|
740 |
const Type *LoadNode::bottom_type() const { return _type; } |
|
741 |
uint LoadNode::ideal_reg() const { |
|
742 |
return Matcher::base2reg[_type->base()]; |
|
743 |
} |
|
744 |
||
745 |
#ifndef PRODUCT |
|
746 |
void LoadNode::dump_spec(outputStream *st) const { |
|
747 |
MemNode::dump_spec(st); |
|
748 |
if( !Verbose && !WizardMode ) { |
|
749 |
// standard dump does this in Verbose and WizardMode |
|
750 |
st->print(" #"); _type->dump_on(st); |
|
751 |
} |
|
752 |
} |
|
753 |
#endif |
|
754 |
||
755 |
||
756 |
//----------------------------LoadNode::make----------------------------------- |
|
757 |
// Polymorphic factory method: |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
758 |
Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
759 |
Compile* C = gvn.C; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
760 |
|
1 | 761 |
// sanity check the alias category against the created node type |
762 |
assert(!(adr_type->isa_oopptr() && |
|
763 |
adr_type->offset() == oopDesc::klass_offset_in_bytes()), |
|
764 |
"use LoadKlassNode instead"); |
|
765 |
assert(!(adr_type->isa_aryptr() && |
|
766 |
adr_type->offset() == arrayOopDesc::length_offset_in_bytes()), |
|
767 |
"use LoadRangeNode instead"); |
|
768 |
switch (bt) { |
|
769 |
case T_BOOLEAN: |
|
770 |
case T_BYTE: return new (C, 3) LoadBNode(ctl, mem, adr, adr_type, rt->is_int() ); |
|
771 |
case T_INT: return new (C, 3) LoadINode(ctl, mem, adr, adr_type, rt->is_int() ); |
|
772 |
case T_CHAR: return new (C, 3) LoadCNode(ctl, mem, adr, adr_type, rt->is_int() ); |
|
773 |
case T_SHORT: return new (C, 3) LoadSNode(ctl, mem, adr, adr_type, rt->is_int() ); |
|
774 |
case T_LONG: return new (C, 3) LoadLNode(ctl, mem, adr, adr_type, rt->is_long() ); |
|
775 |
case T_FLOAT: return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt ); |
|
776 |
case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt ); |
|
777 |
case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() ); |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
778 |
case T_OBJECT: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
779 |
#ifdef _LP64 |
589 | 780 |
if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
781 |
Node* load = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop())); |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
782 |
return new (C, 2) DecodeNNode(load, load->bottom_type()->make_ptr()); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
783 |
} else |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
784 |
#endif |
589 | 785 |
{ |
786 |
assert(!adr->bottom_type()->is_ptr_to_narrowoop(), "should have got back a narrow oop"); |
|
787 |
return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); |
|
788 |
} |
|
1 | 789 |
} |
790 |
ShouldNotReachHere(); |
|
791 |
return (LoadNode*)NULL; |
|
792 |
} |
|
793 |
||
794 |
LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt) { |
|
795 |
bool require_atomic = true; |
|
796 |
return new (C, 3) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic); |
|
797 |
} |
|
798 |
||
799 |
||
800 |
||
801 |
||
802 |
//------------------------------hash------------------------------------------- |
|
803 |
uint LoadNode::hash() const { |
|
804 |
// unroll addition of interesting fields |
|
805 |
return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address); |
|
806 |
} |
|
807 |
||
808 |
//---------------------------can_see_stored_value------------------------------ |
|
809 |
// This routine exists to make sure this set of tests is done the same |
|
810 |
// everywhere. We need to make a coordinated change: first LoadNode::Ideal |
|
811 |
// will change the graph shape in a way which makes memory alive twice at the |
|
812 |
// same time (uses the Oracle model of aliasing), then some |
|
813 |
// LoadXNode::Identity will fold things back to the equivalence-class model |
|
814 |
// of aliasing. |
|
815 |
Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { |
|
816 |
Node* ld_adr = in(MemNode::Address); |
|
817 |
||
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
818 |
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
819 |
Compile::AliasType* atp = tp != NULL ? phase->C->alias_type(tp) : NULL; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
820 |
if (EliminateAutoBox && atp != NULL && atp->index() >= Compile::AliasIdxRaw && |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
821 |
atp->field() != NULL && !atp->field()->is_volatile()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
822 |
uint alias_idx = atp->index(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
823 |
bool final = atp->field()->is_final(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
824 |
Node* result = NULL; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
825 |
Node* current = st; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
826 |
// Skip through chains of MemBarNodes checking the MergeMems for |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
827 |
// new states for the slice of this load. Stop once any other |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
828 |
// kind of node is encountered. Loads from final memory can skip |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
829 |
// through any kind of MemBar but normal loads shouldn't skip |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
830 |
// through MemBarAcquire since the could allow them to move out of |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
831 |
// a synchronized region. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
832 |
while (current->is_Proj()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
833 |
int opc = current->in(0)->Opcode(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
834 |
if ((final && opc == Op_MemBarAcquire) || |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
835 |
opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
836 |
Node* mem = current->in(0)->in(TypeFunc::Memory); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
837 |
if (mem->is_MergeMem()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
838 |
MergeMemNode* merge = mem->as_MergeMem(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
839 |
Node* new_st = merge->memory_at(alias_idx); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
840 |
if (new_st == merge->base_memory()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
841 |
// Keep searching |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
842 |
current = merge->base_memory(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
843 |
continue; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
844 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
845 |
// Save the new memory state for the slice and fall through |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
846 |
// to exit. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
847 |
result = new_st; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
848 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
849 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
850 |
break; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
851 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
852 |
if (result != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
853 |
st = result; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
854 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
855 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
856 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
857 |
|
1 | 858 |
// Loop around twice in the case Load -> Initialize -> Store. |
859 |
// (See PhaseIterGVN::add_users_to_worklist, which knows about this case.) |
|
860 |
for (int trip = 0; trip <= 1; trip++) { |
|
861 |
||
862 |
if (st->is_Store()) { |
|
863 |
Node* st_adr = st->in(MemNode::Address); |
|
864 |
if (!phase->eqv(st_adr, ld_adr)) { |
|
865 |
// Try harder before giving up... Match raw and non-raw pointers. |
|
866 |
intptr_t st_off = 0; |
|
867 |
AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off); |
|
868 |
if (alloc == NULL) return NULL; |
|
869 |
intptr_t ld_off = 0; |
|
870 |
AllocateNode* allo2 = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); |
|
871 |
if (alloc != allo2) return NULL; |
|
872 |
if (ld_off != st_off) return NULL; |
|
873 |
// At this point we have proven something like this setup: |
|
874 |
// A = Allocate(...) |
|
875 |
// L = LoadQ(, AddP(CastPP(, A.Parm),, #Off)) |
|
876 |
// S = StoreQ(, AddP(, A.Parm , #Off), V) |
|
877 |
// (Actually, we haven't yet proven the Q's are the same.) |
|
878 |
// In other words, we are loading from a casted version of |
|
879 |
// the same pointer-and-offset that we stored to. |
|
880 |
// Thus, we are able to replace L by V. |
|
881 |
} |
|
882 |
// Now prove that we have a LoadQ matched to a StoreQ, for some Q. |
|
883 |
if (store_Opcode() != st->Opcode()) |
|
884 |
return NULL; |
|
885 |
return st->in(MemNode::ValueIn); |
|
886 |
} |
|
887 |
||
888 |
intptr_t offset = 0; // scratch |
|
889 |
||
890 |
// A load from a freshly-created object always returns zero. |
|
891 |
// (This can happen after LoadNode::Ideal resets the load's memory input |
|
892 |
// to find_captured_store, which returned InitializeNode::zero_memory.) |
|
893 |
if (st->is_Proj() && st->in(0)->is_Allocate() && |
|
894 |
st->in(0) == AllocateNode::Ideal_allocation(ld_adr, phase, offset) && |
|
895 |
offset >= st->in(0)->as_Allocate()->minimum_header_size()) { |
|
896 |
// return a zero value for the load's basic type |
|
897 |
// (This is one of the few places where a generic PhaseTransform |
|
898 |
// can create new nodes. Think of it as lazily manifesting |
|
899 |
// virtually pre-existing constants.) |
|
900 |
return phase->zerocon(memory_type()); |
|
901 |
} |
|
902 |
||
903 |
// A load from an initialization barrier can match a captured store. |
|
904 |
if (st->is_Proj() && st->in(0)->is_Initialize()) { |
|
905 |
InitializeNode* init = st->in(0)->as_Initialize(); |
|
906 |
AllocateNode* alloc = init->allocation(); |
|
907 |
if (alloc != NULL && |
|
908 |
alloc == AllocateNode::Ideal_allocation(ld_adr, phase, offset)) { |
|
909 |
// examine a captured store value |
|
910 |
st = init->find_captured_store(offset, memory_size(), phase); |
|
911 |
if (st != NULL) |
|
912 |
continue; // take one more trip around |
|
913 |
} |
|
914 |
} |
|
915 |
||
916 |
break; |
|
917 |
} |
|
918 |
||
919 |
return NULL; |
|
920 |
} |
|
921 |
||
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
922 |
//----------------------is_instance_field_load_with_local_phi------------------ |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
923 |
bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
924 |
if( in(MemNode::Memory)->is_Phi() && in(MemNode::Memory)->in(0) == ctrl && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
925 |
in(MemNode::Address)->is_AddP() ) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
926 |
const TypeOopPtr* t_oop = in(MemNode::Address)->bottom_type()->isa_oopptr(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
927 |
// Only instances. |
769 | 928 |
if( t_oop != NULL && t_oop->is_known_instance_field() && |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
929 |
t_oop->offset() != Type::OffsetBot && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
930 |
t_oop->offset() != Type::OffsetTop) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
931 |
return true; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
932 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
933 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
934 |
return false; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
935 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
936 |
|
1 | 937 |
//------------------------------Identity--------------------------------------- |
938 |
// Loads are identity if previous store is to same address |
|
939 |
Node *LoadNode::Identity( PhaseTransform *phase ) { |
|
940 |
// If the previous store-maker is the right kind of Store, and the store is |
|
941 |
// to the same address, then we are equal to the value stored. |
|
942 |
Node* mem = in(MemNode::Memory); |
|
943 |
Node* value = can_see_stored_value(mem, phase); |
|
944 |
if( value ) { |
|
945 |
// byte, short & char stores truncate naturally. |
|
946 |
// A load has to load the truncated value which requires |
|
947 |
// some sort of masking operation and that requires an |
|
948 |
// Ideal call instead of an Identity call. |
|
949 |
if (memory_size() < BytesPerInt) { |
|
950 |
// If the input to the store does not fit with the load's result type, |
|
951 |
// it must be truncated via an Ideal call. |
|
952 |
if (!phase->type(value)->higher_equal(phase->type(this))) |
|
953 |
return this; |
|
954 |
} |
|
955 |
// (This works even when value is a Con, but LoadNode::Value |
|
956 |
// usually runs first, producing the singleton type of the Con.) |
|
957 |
return value; |
|
958 |
} |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
959 |
|
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
960 |
// Search for an existing data phi which was generated before for the same |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
961 |
// instance's field to avoid infinite genertion of phis in a loop. |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
962 |
Node *region = mem->in(0); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
963 |
if (is_instance_field_load_with_local_phi(region)) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
964 |
const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
965 |
int this_index = phase->C->get_alias_index(addr_t); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
966 |
int this_offset = addr_t->offset(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
967 |
int this_id = addr_t->is_oopptr()->instance_id(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
968 |
const Type* this_type = bottom_type(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
969 |
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
970 |
Node* phi = region->fast_out(i); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
971 |
if (phi->is_Phi() && phi != mem && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
972 |
phi->as_Phi()->is_same_inst_field(this_type, this_id, this_index, this_offset)) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
973 |
return phi; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
974 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
975 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
976 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
977 |
|
1 | 978 |
return this; |
979 |
} |
|
980 |
||
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
981 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
982 |
// Returns true if the AliasType refers to the field that holds the |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
983 |
// cached box array. Currently only handles the IntegerCache case. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
984 |
static bool is_autobox_cache(Compile::AliasType* atp) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
985 |
if (atp != NULL && atp->field() != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
986 |
ciField* field = atp->field(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
987 |
ciSymbol* klass = field->holder()->name(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
988 |
if (field->name() == ciSymbol::cache_field_name() && |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
989 |
field->holder()->uses_default_loader() && |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
990 |
klass == ciSymbol::java_lang_Integer_IntegerCache()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
991 |
return true; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
992 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
993 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
994 |
return false; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
995 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
996 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
997 |
// Fetch the base value in the autobox array |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
998 |
static bool fetch_autobox_base(Compile::AliasType* atp, int& cache_offset) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
999 |
if (atp != NULL && atp->field() != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1000 |
ciField* field = atp->field(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1001 |
ciSymbol* klass = field->holder()->name(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1002 |
if (field->name() == ciSymbol::cache_field_name() && |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1003 |
field->holder()->uses_default_loader() && |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1004 |
klass == ciSymbol::java_lang_Integer_IntegerCache()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1005 |
assert(field->is_constant(), "what?"); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1006 |
ciObjArray* array = field->constant_value().as_object()->as_obj_array(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1007 |
// Fetch the box object at the base of the array and get its value |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1008 |
ciInstance* box = array->obj_at(0)->as_instance(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1009 |
ciInstanceKlass* ik = box->klass()->as_instance_klass(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1010 |
if (ik->nof_nonstatic_fields() == 1) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1011 |
// This should be true nonstatic_field_at requires calling |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1012 |
// nof_nonstatic_fields so check it anyway |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1013 |
ciConstant c = box->field_value(ik->nonstatic_field_at(0)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1014 |
cache_offset = c.as_int(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1015 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1016 |
return true; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1017 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1018 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1019 |
return false; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1020 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1021 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1022 |
// Returns true if the AliasType refers to the value field of an |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1023 |
// autobox object. Currently only handles Integer. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1024 |
static bool is_autobox_object(Compile::AliasType* atp) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1025 |
if (atp != NULL && atp->field() != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1026 |
ciField* field = atp->field(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1027 |
ciSymbol* klass = field->holder()->name(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1028 |
if (field->name() == ciSymbol::value_name() && |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1029 |
field->holder()->uses_default_loader() && |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1030 |
klass == ciSymbol::java_lang_Integer()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1031 |
return true; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1032 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1033 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1034 |
return false; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1035 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1036 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1037 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1038 |
// We're loading from an object which has autobox behaviour. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1039 |
// If this object is result of a valueOf call we'll have a phi |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1040 |
// merging a newly allocated object and a load from the cache. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1041 |
// We want to replace this load with the original incoming |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1042 |
// argument to the valueOf call. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1043 |
Node* LoadNode::eliminate_autobox(PhaseGVN* phase) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1044 |
Node* base = in(Address)->in(AddPNode::Base); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1045 |
if (base->is_Phi() && base->req() == 3) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1046 |
AllocateNode* allocation = NULL; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1047 |
int allocation_index = -1; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1048 |
int load_index = -1; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1049 |
for (uint i = 1; i < base->req(); i++) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1050 |
allocation = AllocateNode::Ideal_allocation(base->in(i), phase); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1051 |
if (allocation != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1052 |
allocation_index = i; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1053 |
load_index = 3 - allocation_index; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1054 |
break; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1055 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1056 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1057 |
LoadNode* load = NULL; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1058 |
if (allocation != NULL && base->in(load_index)->is_Load()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1059 |
load = base->in(load_index)->as_Load(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1060 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1061 |
if (load != NULL && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1062 |
// Push the loads from the phi that comes from valueOf up |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1063 |
// through it to allow elimination of the loads and the recovery |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1064 |
// of the original value. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1065 |
Node* mem_phi = in(Memory); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1066 |
Node* offset = in(Address)->in(AddPNode::Offset); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1067 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1068 |
Node* in1 = clone(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1069 |
Node* in1_addr = in1->in(Address)->clone(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1070 |
in1_addr->set_req(AddPNode::Base, base->in(allocation_index)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1071 |
in1_addr->set_req(AddPNode::Address, base->in(allocation_index)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1072 |
in1_addr->set_req(AddPNode::Offset, offset); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1073 |
in1->set_req(0, base->in(allocation_index)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1074 |
in1->set_req(Address, in1_addr); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1075 |
in1->set_req(Memory, mem_phi->in(allocation_index)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1076 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1077 |
Node* in2 = clone(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1078 |
Node* in2_addr = in2->in(Address)->clone(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1079 |
in2_addr->set_req(AddPNode::Base, base->in(load_index)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1080 |
in2_addr->set_req(AddPNode::Address, base->in(load_index)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1081 |
in2_addr->set_req(AddPNode::Offset, offset); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1082 |
in2->set_req(0, base->in(load_index)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1083 |
in2->set_req(Address, in2_addr); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1084 |
in2->set_req(Memory, mem_phi->in(load_index)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1085 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1086 |
in1_addr = phase->transform(in1_addr); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1087 |
in1 = phase->transform(in1); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1088 |
in2_addr = phase->transform(in2_addr); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1089 |
in2 = phase->transform(in2); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1090 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1091 |
PhiNode* result = PhiNode::make_blank(base->in(0), this); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1092 |
result->set_req(allocation_index, in1); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1093 |
result->set_req(load_index, in2); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1094 |
return result; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1095 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1096 |
} else if (base->is_Load()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1097 |
// Eliminate the load of Integer.value for integers from the cache |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1098 |
// array by deriving the value from the index into the array. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1099 |
// Capture the offset of the load and then reverse the computation. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1100 |
Node* load_base = base->in(Address)->in(AddPNode::Base); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1101 |
if (load_base != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1102 |
Compile::AliasType* atp = phase->C->alias_type(load_base->adr_type()); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1103 |
intptr_t cache_offset; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1104 |
int shift = -1; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1105 |
Node* cache = NULL; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1106 |
if (is_autobox_cache(atp)) { |
202
dc13bf0e5d5d
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
190
diff
changeset
|
1107 |
shift = exact_log2(type2aelembytes(T_OBJECT)); |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1108 |
cache = AddPNode::Ideal_base_and_offset(load_base->in(Address), phase, cache_offset); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1109 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1110 |
if (cache != NULL && base->in(Address)->is_AddP()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1111 |
Node* elements[4]; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1112 |
int count = base->in(Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1113 |
int cache_low; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1114 |
if (count > 0 && fetch_autobox_base(atp, cache_low)) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1115 |
int offset = arrayOopDesc::base_offset_in_bytes(memory_type()) - (cache_low << shift); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1116 |
// Add up all the offsets making of the address of the load |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1117 |
Node* result = elements[0]; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1118 |
for (int i = 1; i < count; i++) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1119 |
result = phase->transform(new (phase->C, 3) AddXNode(result, elements[i])); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1120 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1121 |
// Remove the constant offset from the address and then |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1122 |
// remove the scaling of the offset to recover the original index. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1123 |
result = phase->transform(new (phase->C, 3) AddXNode(result, phase->MakeConX(-offset))); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1124 |
if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1125 |
// Peel the shift off directly but wrap it in a dummy node |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1126 |
// since Ideal can't return existing nodes |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1127 |
result = new (phase->C, 3) RShiftXNode(result->in(1), phase->intcon(0)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1128 |
} else { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1129 |
result = new (phase->C, 3) RShiftXNode(result, phase->intcon(shift)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1130 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1131 |
#ifdef _LP64 |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1132 |
result = new (phase->C, 2) ConvL2INode(phase->transform(result)); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1133 |
#endif |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1134 |
return result; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1135 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1136 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1137 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1138 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1139 |
return NULL; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1140 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1141 |
|
589 | 1142 |
//------------------------------split_through_phi------------------------------ |
1143 |
// Split instance field load through Phi. |
|
1144 |
Node *LoadNode::split_through_phi(PhaseGVN *phase) { |
|
1145 |
Node* mem = in(MemNode::Memory); |
|
1146 |
Node* address = in(MemNode::Address); |
|
1147 |
const TypePtr *addr_t = phase->type(address)->isa_ptr(); |
|
1148 |
const TypeOopPtr *t_oop = addr_t->isa_oopptr(); |
|
1149 |
||
1150 |
assert(mem->is_Phi() && (t_oop != NULL) && |
|
769 | 1151 |
t_oop->is_known_instance_field(), "invalide conditions"); |
589 | 1152 |
|
1153 |
Node *region = mem->in(0); |
|
1154 |
if (region == NULL) { |
|
1155 |
return NULL; // Wait stable graph |
|
1156 |
} |
|
1157 |
uint cnt = mem->req(); |
|
1158 |
for( uint i = 1; i < cnt; i++ ) { |
|
1159 |
Node *in = mem->in(i); |
|
1160 |
if( in == NULL ) { |
|
1161 |
return NULL; // Wait stable graph |
|
1162 |
} |
|
1163 |
} |
|
1164 |
// Check for loop invariant. |
|
1165 |
if (cnt == 3) { |
|
1166 |
for( uint i = 1; i < cnt; i++ ) { |
|
1167 |
Node *in = mem->in(i); |
|
1168 |
Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); |
|
1169 |
if (m == mem) { |
|
1170 |
set_req(MemNode::Memory, mem->in(cnt - i)); // Skip this phi. |
|
1171 |
return this; |
|
1172 |
} |
|
1173 |
} |
|
1174 |
} |
|
1175 |
// Split through Phi (see original code in loopopts.cpp). |
|
1176 |
assert(phase->C->have_alias_type(addr_t), "instance should have alias type"); |
|
1177 |
||
1178 |
// Do nothing here if Identity will find a value |
|
1179 |
// (to avoid infinite chain of value phis generation). |
|
1180 |
if ( !phase->eqv(this, this->Identity(phase)) ) |
|
1181 |
return NULL; |
|
1182 |
||
1183 |
// Skip the split if the region dominates some control edge of the address. |
|
1184 |
if (cnt == 3 && !MemNode::all_controls_dominate(address, region)) |
|
1185 |
return NULL; |
|
1186 |
||
1187 |
const Type* this_type = this->bottom_type(); |
|
1188 |
int this_index = phase->C->get_alias_index(addr_t); |
|
1189 |
int this_offset = addr_t->offset(); |
|
1190 |
int this_iid = addr_t->is_oopptr()->instance_id(); |
|
1191 |
int wins = 0; |
|
1192 |
PhaseIterGVN *igvn = phase->is_IterGVN(); |
|
1193 |
Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); |
|
1194 |
for( uint i = 1; i < region->req(); i++ ) { |
|
1195 |
Node *x; |
|
1196 |
Node* the_clone = NULL; |
|
1197 |
if( region->in(i) == phase->C->top() ) { |
|
1198 |
x = phase->C->top(); // Dead path? Use a dead data op |
|
1199 |
} else { |
|
1200 |
x = this->clone(); // Else clone up the data op |
|
1201 |
the_clone = x; // Remember for possible deletion. |
|
1202 |
// Alter data node to use pre-phi inputs |
|
1203 |
if( this->in(0) == region ) { |
|
1204 |
x->set_req( 0, region->in(i) ); |
|
1205 |
} else { |
|
1206 |
x->set_req( 0, NULL ); |
|
1207 |
} |
|
1208 |
for( uint j = 1; j < this->req(); j++ ) { |
|
1209 |
Node *in = this->in(j); |
|
1210 |
if( in->is_Phi() && in->in(0) == region ) |
|
1211 |
x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone |
|
1212 |
} |
|
1213 |
} |
|
1214 |
// Check for a 'win' on some paths |
|
1215 |
const Type *t = x->Value(igvn); |
|
1216 |
||
1217 |
bool singleton = t->singleton(); |
|
1218 |
||
1219 |
// See comments in PhaseIdealLoop::split_thru_phi(). |
|
1220 |
if( singleton && t == Type::TOP ) { |
|
1221 |
singleton &= region->is_Loop() && (i != LoopNode::EntryControl); |
|
1222 |
} |
|
1223 |
||
1224 |
if( singleton ) { |
|
1225 |
wins++; |
|
1226 |
x = igvn->makecon(t); |
|
1227 |
} else { |
|
1228 |
// We now call Identity to try to simplify the cloned node. |
|
1229 |
// Note that some Identity methods call phase->type(this). |
|
1230 |
// Make sure that the type array is big enough for |
|
1231 |
// our new node, even though we may throw the node away. |
|
1232 |
// (This tweaking with igvn only works because x is a new node.) |
|
1233 |
igvn->set_type(x, t); |
|
1055
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1234 |
// If x is a TypeNode, capture any more-precise type permanently into Node |
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1235 |
// othewise it will be not updated during igvn->transform since |
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1236 |
// igvn->type(x) is set to x->Value() already. |
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1237 |
x->raise_bottom_type(t); |
589 | 1238 |
Node *y = x->Identity(igvn); |
1239 |
if( y != x ) { |
|
1240 |
wins++; |
|
1241 |
x = y; |
|
1242 |
} else { |
|
1243 |
y = igvn->hash_find(x); |
|
1244 |
if( y ) { |
|
1245 |
wins++; |
|
1246 |
x = y; |
|
1247 |
} else { |
|
1248 |
// Else x is a new node we are keeping |
|
1249 |
// We do not need register_new_node_with_optimizer |
|
1250 |
// because set_type has already been called. |
|
1251 |
igvn->_worklist.push(x); |
|
1252 |
} |
|
1253 |
} |
|
1254 |
} |
|
1255 |
if (x != the_clone && the_clone != NULL) |
|
1256 |
igvn->remove_dead_node(the_clone); |
|
1257 |
phi->set_req(i, x); |
|
1258 |
} |
|
1259 |
if( wins > 0 ) { |
|
1260 |
// Record Phi |
|
1261 |
igvn->register_new_node_with_optimizer(phi); |
|
1262 |
return phi; |
|
1263 |
} |
|
1264 |
igvn->remove_dead_node(phi); |
|
1265 |
return NULL; |
|
1266 |
} |
|
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1267 |
|
1 | 1268 |
//------------------------------Ideal------------------------------------------ |
1269 |
// If the load is from Field memory and the pointer is non-null, we can |
|
1270 |
// zero out the control input. |
|
1271 |
// If the offset is constant and the base is an object allocation, |
|
1272 |
// try to hook me up to the exact initializing store. |
|
1273 |
Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1274 |
Node* p = MemNode::Ideal_common(phase, can_reshape); |
|
1275 |
if (p) return (p == NodeSentinel) ? NULL : p; |
|
1276 |
||
1277 |
Node* ctrl = in(MemNode::Control); |
|
1278 |
Node* address = in(MemNode::Address); |
|
1279 |
||
1280 |
// Skip up past a SafePoint control. Cannot do this for Stores because |
|
1281 |
// pointer stores & cardmarks must stay on the same side of a SafePoint. |
|
1282 |
if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint && |
|
1283 |
phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) { |
|
1284 |
ctrl = ctrl->in(0); |
|
1285 |
set_req(MemNode::Control,ctrl); |
|
1286 |
} |
|
1287 |
||
1288 |
// Check for useless control edge in some common special cases |
|
1289 |
if (in(MemNode::Control) != NULL) { |
|
1290 |
intptr_t ignore = 0; |
|
1291 |
Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); |
|
1292 |
if (base != NULL |
|
1293 |
&& phase->type(base)->higher_equal(TypePtr::NOTNULL) |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
1294 |
&& all_controls_dominate(base, phase->C->start())) { |
1 | 1295 |
// A method-invariant, non-null address (constant or 'this' argument). |
1296 |
set_req(MemNode::Control, NULL); |
|
1297 |
} |
|
1298 |
} |
|
1299 |
||
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1300 |
if (EliminateAutoBox && can_reshape && in(Address)->is_AddP()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1301 |
Node* base = in(Address)->in(AddPNode::Base); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1302 |
if (base != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1303 |
Compile::AliasType* atp = phase->C->alias_type(adr_type()); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1304 |
if (is_autobox_object(atp)) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1305 |
Node* result = eliminate_autobox(phase); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1306 |
if (result != NULL) return result; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1307 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1308 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1309 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1310 |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1311 |
Node* mem = in(MemNode::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1312 |
const TypePtr *addr_t = phase->type(address)->isa_ptr(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1313 |
|
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1314 |
if (addr_t != NULL) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1315 |
// try to optimize our memory input |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1316 |
Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, phase); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1317 |
if (opt_mem != mem) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1318 |
set_req(MemNode::Memory, opt_mem); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1319 |
return this; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1320 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1321 |
const TypeOopPtr *t_oop = addr_t->isa_oopptr(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1322 |
if (can_reshape && opt_mem->is_Phi() && |
769 | 1323 |
(t_oop != NULL) && t_oop->is_known_instance_field()) { |
589 | 1324 |
// Split instance field load through Phi. |
1325 |
Node* result = split_through_phi(phase); |
|
1326 |
if (result != NULL) return result; |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1327 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1328 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1329 |
|
1 | 1330 |
// Check for prior store with a different base or offset; make Load |
1331 |
// independent. Skip through any number of them. Bail out if the stores |
|
1332 |
// are in an endless dead cycle and report no progress. This is a key |
|
1333 |
// transform for Reflection. However, if after skipping through the Stores |
|
1334 |
// we can't then fold up against a prior store do NOT do the transform as |
|
1335 |
// this amounts to using the 'Oracle' model of aliasing. It leaves the same |
|
1336 |
// array memory alive twice: once for the hoisted Load and again after the |
|
1337 |
// bypassed Store. This situation only works if EVERYBODY who does |
|
1338 |
// anti-dependence work knows how to bypass. I.e. we need all |
|
1339 |
// anti-dependence checks to ask the same Oracle. Right now, that Oracle is |
|
1340 |
// the alias index stuff. So instead, peek through Stores and IFF we can |
|
1341 |
// fold up, do so. |
|
1342 |
Node* prev_mem = find_previous_store(phase); |
|
1343 |
// Steps (a), (b): Walk past independent stores to find an exact match. |
|
1344 |
if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) { |
|
1345 |
// (c) See if we can fold up on the spot, but don't fold up here. |
|
1346 |
// Fold-up might require truncation (for LoadB/LoadS/LoadC) or |
|
1347 |
// just return a prior value, which is done by Identity calls. |
|
1348 |
if (can_see_stored_value(prev_mem, phase)) { |
|
1349 |
// Make ready for step (d): |
|
1350 |
set_req(MemNode::Memory, prev_mem); |
|
1351 |
return this; |
|
1352 |
} |
|
1353 |
} |
|
1354 |
||
1355 |
return NULL; // No further progress |
|
1356 |
} |
|
1357 |
||
1358 |
// Helper to recognize certain Klass fields which are invariant across |
|
1359 |
// some group of array types (e.g., int[] or all T[] where T < Object). |
|
1360 |
const Type* |
|
1361 |
LoadNode::load_array_final_field(const TypeKlassPtr *tkls, |
|
1362 |
ciKlass* klass) const { |
|
1363 |
if (tkls->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc)) { |
|
1364 |
// The field is Klass::_modifier_flags. Return its (constant) value. |
|
1365 |
// (Folds up the 2nd indirection in aClassConstant.getModifiers().) |
|
1366 |
assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags"); |
|
1367 |
return TypeInt::make(klass->modifier_flags()); |
|
1368 |
} |
|
1369 |
if (tkls->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc)) { |
|
1370 |
// The field is Klass::_access_flags. Return its (constant) value. |
|
1371 |
// (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).) |
|
1372 |
assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags"); |
|
1373 |
return TypeInt::make(klass->access_flags()); |
|
1374 |
} |
|
1375 |
if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc)) { |
|
1376 |
// The field is Klass::_layout_helper. Return its constant value if known. |
|
1377 |
assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper"); |
|
1378 |
return TypeInt::make(klass->layout_helper()); |
|
1379 |
} |
|
1380 |
||
1381 |
// No match. |
|
1382 |
return NULL; |
|
1383 |
} |
|
1384 |
||
1385 |
//------------------------------Value----------------------------------------- |
|
1386 |
const Type *LoadNode::Value( PhaseTransform *phase ) const { |
|
1387 |
// Either input is TOP ==> the result is TOP |
|
1388 |
Node* mem = in(MemNode::Memory); |
|
1389 |
const Type *t1 = phase->type(mem); |
|
1390 |
if (t1 == Type::TOP) return Type::TOP; |
|
1391 |
Node* adr = in(MemNode::Address); |
|
1392 |
const TypePtr* tp = phase->type(adr)->isa_ptr(); |
|
1393 |
if (tp == NULL || tp->empty()) return Type::TOP; |
|
1394 |
int off = tp->offset(); |
|
1395 |
assert(off != Type::OffsetTop, "case covered by TypePtr::empty"); |
|
1396 |
||
1397 |
// Try to guess loaded type from pointer type |
|
1398 |
if (tp->base() == Type::AryPtr) { |
|
1399 |
const Type *t = tp->is_aryptr()->elem(); |
|
1400 |
// Don't do this for integer types. There is only potential profit if |
|
1401 |
// the element type t is lower than _type; that is, for int types, if _type is |
|
1402 |
// more restrictive than t. This only happens here if one is short and the other |
|
1403 |
// char (both 16 bits), and in those cases we've made an intentional decision |
|
1404 |
// to use one kind of load over the other. See AndINode::Ideal and 4965907. |
|
1405 |
// Also, do not try to narrow the type for a LoadKlass, regardless of offset. |
|
1406 |
// |
|
1407 |
// Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8)) |
|
1408 |
// where the _gvn.type of the AddP is wider than 8. This occurs when an earlier |
|
1409 |
// copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been |
|
1410 |
// subsumed by p1. If p1 is on the worklist but has not yet been re-transformed, |
|
1411 |
// it is possible that p1 will have a type like Foo*[int+]:NotNull*+any. |
|
1412 |
// In fact, that could have been the original type of p1, and p1 could have |
|
1413 |
// had an original form like p1:(AddP x x (LShiftL quux 3)), where the |
|
1414 |
// expression (LShiftL quux 3) independently optimized to the constant 8. |
|
1415 |
if ((t->isa_int() == NULL) && (t->isa_long() == NULL) |
|
1055
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1416 |
&& Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { |
1 | 1417 |
// t might actually be lower than _type, if _type is a unique |
1418 |
// concrete subclass of abstract class t. |
|
1419 |
// Make sure the reference is not into the header, by comparing |
|
1420 |
// the offset against the offset of the start of the array's data. |
|
1421 |
// Different array types begin at slightly different offsets (12 vs. 16). |
|
1422 |
// We choose T_BYTE as an example base type that is least restrictive |
|
1423 |
// as to alignment, which will therefore produce the smallest |
|
1424 |
// possible base offset. |
|
1425 |
const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); |
|
1426 |
if ((uint)off >= (uint)min_base_off) { // is the offset beyond the header? |
|
1427 |
const Type* jt = t->join(_type); |
|
1428 |
// In any case, do not allow the join, per se, to empty out the type. |
|
1429 |
if (jt->empty() && !t->empty()) { |
|
1430 |
// This can happen if a interface-typed array narrows to a class type. |
|
1431 |
jt = _type; |
|
1432 |
} |
|
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1433 |
|
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1434 |
if (EliminateAutoBox) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1435 |
// The pointers in the autobox arrays are always non-null |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1436 |
Node* base = in(Address)->in(AddPNode::Base); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1437 |
if (base != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1438 |
Compile::AliasType* atp = phase->C->alias_type(base->adr_type()); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1439 |
if (is_autobox_cache(atp)) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1440 |
return jt->join(TypePtr::NOTNULL)->is_ptr(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1441 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1442 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1443 |
} |
1 | 1444 |
return jt; |
1445 |
} |
|
1446 |
} |
|
1447 |
} else if (tp->base() == Type::InstPtr) { |
|
1448 |
assert( off != Type::OffsetBot || |
|
1449 |
// arrays can be cast to Objects |
|
1450 |
tp->is_oopptr()->klass()->is_java_lang_Object() || |
|
1451 |
// unsafe field access may not have a constant offset |
|
1452 |
phase->C->has_unsafe_access(), |
|
1453 |
"Field accesses must be precise" ); |
|
1454 |
// For oop loads, we expect the _type to be precise |
|
1455 |
} else if (tp->base() == Type::KlassPtr) { |
|
1456 |
assert( off != Type::OffsetBot || |
|
1457 |
// arrays can be cast to Objects |
|
1458 |
tp->is_klassptr()->klass()->is_java_lang_Object() || |
|
1459 |
// also allow array-loading from the primary supertype |
|
1460 |
// array during subtype checks |
|
1461 |
Opcode() == Op_LoadKlass, |
|
1462 |
"Field accesses must be precise" ); |
|
1463 |
// For klass/static loads, we expect the _type to be precise |
|
1464 |
} |
|
1465 |
||
1466 |
const TypeKlassPtr *tkls = tp->isa_klassptr(); |
|
1467 |
if (tkls != NULL && !StressReflectiveCode) { |
|
1468 |
ciKlass* klass = tkls->klass(); |
|
1469 |
if (klass->is_loaded() && tkls->klass_is_exact()) { |
|
1470 |
// We are loading a field from a Klass metaobject whose identity |
|
1471 |
// is known at compile time (the type is "exact" or "precise"). |
|
1472 |
// Check for fields we know are maintained as constants by the VM. |
|
1473 |
if (tkls->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc)) { |
|
1474 |
// The field is Klass::_super_check_offset. Return its (constant) value. |
|
1475 |
// (Folds up type checking code.) |
|
1476 |
assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); |
|
1477 |
return TypeInt::make(klass->super_check_offset()); |
|
1478 |
} |
|
1479 |
// Compute index into primary_supers array |
|
1480 |
juint depth = (tkls->offset() - (Klass::primary_supers_offset_in_bytes() + (int)sizeof(oopDesc))) / sizeof(klassOop); |
|
1481 |
// Check for overflowing; use unsigned compare to handle the negative case. |
|
1482 |
if( depth < ciKlass::primary_super_limit() ) { |
|
1483 |
// The field is an element of Klass::_primary_supers. Return its (constant) value. |
|
1484 |
// (Folds up type checking code.) |
|
1485 |
assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); |
|
1486 |
ciKlass *ss = klass->super_of_depth(depth); |
|
1487 |
return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; |
|
1488 |
} |
|
1489 |
const Type* aift = load_array_final_field(tkls, klass); |
|
1490 |
if (aift != NULL) return aift; |
|
1491 |
if (tkls->offset() == in_bytes(arrayKlass::component_mirror_offset()) + (int)sizeof(oopDesc) |
|
1492 |
&& klass->is_array_klass()) { |
|
1493 |
// The field is arrayKlass::_component_mirror. Return its (constant) value. |
|
1494 |
// (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.) |
|
1495 |
assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror"); |
|
1496 |
return TypeInstPtr::make(klass->as_array_klass()->component_mirror()); |
|
1497 |
} |
|
1498 |
if (tkls->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc)) { |
|
1499 |
// The field is Klass::_java_mirror. Return its (constant) value. |
|
1500 |
// (Folds up the 2nd indirection in anObjConstant.getClass().) |
|
1501 |
assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); |
|
1502 |
return TypeInstPtr::make(klass->java_mirror()); |
|
1503 |
} |
|
1504 |
} |
|
1505 |
||
1506 |
// We can still check if we are loading from the primary_supers array at a |
|
1507 |
// shallow enough depth. Even though the klass is not exact, entries less |
|
1508 |
// than or equal to its super depth are correct. |
|
1509 |
if (klass->is_loaded() ) { |
|
1510 |
ciType *inner = klass->klass(); |
|
1511 |
while( inner->is_obj_array_klass() ) |
|
1512 |
inner = inner->as_obj_array_klass()->base_element_type(); |
|
1513 |
if( inner->is_instance_klass() && |
|
1514 |
!inner->as_instance_klass()->flags().is_interface() ) { |
|
1515 |
// Compute index into primary_supers array |
|
1516 |
juint depth = (tkls->offset() - (Klass::primary_supers_offset_in_bytes() + (int)sizeof(oopDesc))) / sizeof(klassOop); |
|
1517 |
// Check for overflowing; use unsigned compare to handle the negative case. |
|
1518 |
if( depth < ciKlass::primary_super_limit() && |
|
1519 |
depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case |
|
1520 |
// The field is an element of Klass::_primary_supers. Return its (constant) value. |
|
1521 |
// (Folds up type checking code.) |
|
1522 |
assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); |
|
1523 |
ciKlass *ss = klass->super_of_depth(depth); |
|
1524 |
return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; |
|
1525 |
} |
|
1526 |
} |
|
1527 |
} |
|
1528 |
||
1529 |
// If the type is enough to determine that the thing is not an array, |
|
1530 |
// we can give the layout_helper a positive interval type. |
|
1531 |
// This will help short-circuit some reflective code. |
|
1532 |
if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc) |
|
1533 |
&& !klass->is_array_klass() // not directly typed as an array |
|
1534 |
&& !klass->is_interface() // specifically not Serializable & Cloneable |
|
1535 |
&& !klass->is_java_lang_Object() // not the supertype of all T[] |
|
1536 |
) { |
|
1537 |
// Note: When interfaces are reliable, we can narrow the interface |
|
1538 |
// test to (klass != Serializable && klass != Cloneable). |
|
1539 |
assert(Opcode() == Op_LoadI, "must load an int from _layout_helper"); |
|
1540 |
jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false); |
|
1541 |
// The key property of this type is that it folds up tests |
|
1542 |
// for array-ness, since it proves that the layout_helper is positive. |
|
1543 |
// Thus, a generic value like the basic object layout helper works fine. |
|
1544 |
return TypeInt::make(min_size, max_jint, Type::WidenMin); |
|
1545 |
} |
|
1546 |
} |
|
1547 |
||
1548 |
// If we are loading from a freshly-allocated object, produce a zero, |
|
1549 |
// if the load is provably beyond the header of the object. |
|
1550 |
// (Also allow a variable load from a fresh array to produce zero.) |
|
1551 |
if (ReduceFieldZeroing) { |
|
1552 |
Node* value = can_see_stored_value(mem,phase); |
|
1553 |
if (value != NULL && value->is_Con()) |
|
1554 |
return value->bottom_type(); |
|
1555 |
} |
|
1556 |
||
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1557 |
const TypeOopPtr *tinst = tp->isa_oopptr(); |
769 | 1558 |
if (tinst != NULL && tinst->is_known_instance_field()) { |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1559 |
// If we have an instance type and our memory input is the |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1560 |
// programs's initial memory state, there is no matching store, |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1561 |
// so just return a zero of the appropriate type |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1562 |
Node *mem = in(MemNode::Memory); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1563 |
if (mem->is_Parm() && mem->in(0)->is_Start()) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1564 |
assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1565 |
return Type::get_zero_type(_type->basic_type()); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1566 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1567 |
} |
1 | 1568 |
return _type; |
1569 |
} |
|
1570 |
||
1571 |
//------------------------------match_edge------------------------------------- |
|
1572 |
// Do we Match on this edge index or not? Match only the address. |
|
1573 |
uint LoadNode::match_edge(uint idx) const { |
|
1574 |
return idx == MemNode::Address; |
|
1575 |
} |
|
1576 |
||
1577 |
//--------------------------LoadBNode::Ideal-------------------------------------- |
|
1578 |
// |
|
1579 |
// If the previous store is to the same address as this load, |
|
1580 |
// and the value stored was larger than a byte, replace this load |
|
1581 |
// with the value stored truncated to a byte. If no truncation is |
|
1582 |
// needed, the replacement is done in LoadNode::Identity(). |
|
1583 |
// |
|
1584 |
Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1585 |
Node* mem = in(MemNode::Memory); |
|
1586 |
Node* value = can_see_stored_value(mem,phase); |
|
1587 |
if( value && !phase->type(value)->higher_equal( _type ) ) { |
|
1588 |
Node *result = phase->transform( new (phase->C, 3) LShiftINode(value, phase->intcon(24)) ); |
|
1589 |
return new (phase->C, 3) RShiftINode(result, phase->intcon(24)); |
|
1590 |
} |
|
1591 |
// Identity call will handle the case where truncation is not needed. |
|
1592 |
return LoadNode::Ideal(phase, can_reshape); |
|
1593 |
} |
|
1594 |
||
1595 |
//--------------------------LoadCNode::Ideal-------------------------------------- |
|
1596 |
// |
|
1597 |
// If the previous store is to the same address as this load, |
|
1598 |
// and the value stored was larger than a char, replace this load |
|
1599 |
// with the value stored truncated to a char. If no truncation is |
|
1600 |
// needed, the replacement is done in LoadNode::Identity(). |
|
1601 |
// |
|
1602 |
Node *LoadCNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1603 |
Node* mem = in(MemNode::Memory); |
|
1604 |
Node* value = can_see_stored_value(mem,phase); |
|
1605 |
if( value && !phase->type(value)->higher_equal( _type ) ) |
|
1606 |
return new (phase->C, 3) AndINode(value,phase->intcon(0xFFFF)); |
|
1607 |
// Identity call will handle the case where truncation is not needed. |
|
1608 |
return LoadNode::Ideal(phase, can_reshape); |
|
1609 |
} |
|
1610 |
||
1611 |
//--------------------------LoadSNode::Ideal-------------------------------------- |
|
1612 |
// |
|
1613 |
// If the previous store is to the same address as this load, |
|
1614 |
// and the value stored was larger than a short, replace this load |
|
1615 |
// with the value stored truncated to a short. If no truncation is |
|
1616 |
// needed, the replacement is done in LoadNode::Identity(). |
|
1617 |
// |
|
1618 |
Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1619 |
Node* mem = in(MemNode::Memory); |
|
1620 |
Node* value = can_see_stored_value(mem,phase); |
|
1621 |
if( value && !phase->type(value)->higher_equal( _type ) ) { |
|
1622 |
Node *result = phase->transform( new (phase->C, 3) LShiftINode(value, phase->intcon(16)) ); |
|
1623 |
return new (phase->C, 3) RShiftINode(result, phase->intcon(16)); |
|
1624 |
} |
|
1625 |
// Identity call will handle the case where truncation is not needed. |
|
1626 |
return LoadNode::Ideal(phase, can_reshape); |
|
1627 |
} |
|
1628 |
||
1629 |
//============================================================================= |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1630 |
//----------------------------LoadKlassNode::make------------------------------ |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1631 |
// Polymorphic factory method: |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1632 |
Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) { |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1633 |
Compile* C = gvn.C; |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1634 |
Node *ctl = NULL; |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1635 |
// sanity check the alias category against the created node type |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1636 |
const TypeOopPtr *adr_type = adr->bottom_type()->isa_oopptr(); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1637 |
assert(adr_type != NULL, "expecting TypeOopPtr"); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1638 |
#ifdef _LP64 |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1639 |
if (adr_type->is_ptr_to_narrowoop()) { |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1640 |
Node* load_klass = gvn.transform(new (C, 3) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowoop())); |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1641 |
return new (C, 2) DecodeNNode(load_klass, load_klass->bottom_type()->make_ptr()); |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1642 |
} |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1643 |
#endif |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1644 |
assert(!adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); |
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1645 |
return new (C, 3) LoadKlassNode(ctl, mem, adr, at, tk); |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1646 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1647 |
|
1 | 1648 |
//------------------------------Value------------------------------------------ |
1649 |
const Type *LoadKlassNode::Value( PhaseTransform *phase ) const { |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1650 |
return klass_value_common(phase); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1651 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1652 |
|
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1653 |
const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const { |
1 | 1654 |
// Either input is TOP ==> the result is TOP |
1655 |
const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
1656 |
if (t1 == Type::TOP) return Type::TOP; |
|
1657 |
Node *adr = in(MemNode::Address); |
|
1658 |
const Type *t2 = phase->type( adr ); |
|
1659 |
if (t2 == Type::TOP) return Type::TOP; |
|
1660 |
const TypePtr *tp = t2->is_ptr(); |
|
1661 |
if (TypePtr::above_centerline(tp->ptr()) || |
|
1662 |
tp->ptr() == TypePtr::Null) return Type::TOP; |
|
1663 |
||
1664 |
// Return a more precise klass, if possible |
|
1665 |
const TypeInstPtr *tinst = tp->isa_instptr(); |
|
1666 |
if (tinst != NULL) { |
|
1667 |
ciInstanceKlass* ik = tinst->klass()->as_instance_klass(); |
|
1668 |
int offset = tinst->offset(); |
|
1669 |
if (ik == phase->C->env()->Class_klass() |
|
1670 |
&& (offset == java_lang_Class::klass_offset_in_bytes() || |
|
1671 |
offset == java_lang_Class::array_klass_offset_in_bytes())) { |
|
1672 |
// We are loading a special hidden field from a Class mirror object, |
|
1673 |
// the field which points to the VM's Klass metaobject. |
|
1674 |
ciType* t = tinst->java_mirror_type(); |
|
1675 |
// java_mirror_type returns non-null for compile-time Class constants. |
|
1676 |
if (t != NULL) { |
|
1677 |
// constant oop => constant klass |
|
1678 |
if (offset == java_lang_Class::array_klass_offset_in_bytes()) { |
|
1679 |
return TypeKlassPtr::make(ciArrayKlass::make(t)); |
|
1680 |
} |
|
1681 |
if (!t->is_klass()) { |
|
1682 |
// a primitive Class (e.g., int.class) has NULL for a klass field |
|
1683 |
return TypePtr::NULL_PTR; |
|
1684 |
} |
|
1685 |
// (Folds up the 1st indirection in aClassConstant.getModifiers().) |
|
1686 |
return TypeKlassPtr::make(t->as_klass()); |
|
1687 |
} |
|
1688 |
// non-constant mirror, so we can't tell what's going on |
|
1689 |
} |
|
1690 |
if( !ik->is_loaded() ) |
|
1691 |
return _type; // Bail out if not loaded |
|
1692 |
if (offset == oopDesc::klass_offset_in_bytes()) { |
|
1693 |
if (tinst->klass_is_exact()) { |
|
1694 |
return TypeKlassPtr::make(ik); |
|
1695 |
} |
|
1696 |
// See if we can become precise: no subklasses and no interface |
|
1697 |
// (Note: We need to support verified interfaces.) |
|
1698 |
if (!ik->is_interface() && !ik->has_subklass()) { |
|
1699 |
//assert(!UseExactTypes, "this code should be useless with exact types"); |
|
1700 |
// Add a dependence; if any subclass added we need to recompile |
|
1701 |
if (!ik->is_final()) { |
|
1702 |
// %%% should use stronger assert_unique_concrete_subtype instead |
|
1703 |
phase->C->dependencies()->assert_leaf_type(ik); |
|
1704 |
} |
|
1705 |
// Return precise klass |
|
1706 |
return TypeKlassPtr::make(ik); |
|
1707 |
} |
|
1708 |
||
1709 |
// Return root of possible klass |
|
1710 |
return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/); |
|
1711 |
} |
|
1712 |
} |
|
1713 |
||
1714 |
// Check for loading klass from an array |
|
1715 |
const TypeAryPtr *tary = tp->isa_aryptr(); |
|
1716 |
if( tary != NULL ) { |
|
1717 |
ciKlass *tary_klass = tary->klass(); |
|
1718 |
if (tary_klass != NULL // can be NULL when at BOTTOM or TOP |
|
1719 |
&& tary->offset() == oopDesc::klass_offset_in_bytes()) { |
|
1720 |
if (tary->klass_is_exact()) { |
|
1721 |
return TypeKlassPtr::make(tary_klass); |
|
1722 |
} |
|
1723 |
ciArrayKlass *ak = tary->klass()->as_array_klass(); |
|
1724 |
// If the klass is an object array, we defer the question to the |
|
1725 |
// array component klass. |
|
1726 |
if( ak->is_obj_array_klass() ) { |
|
1727 |
assert( ak->is_loaded(), "" ); |
|
1728 |
ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass(); |
|
1729 |
if( base_k->is_loaded() && base_k->is_instance_klass() ) { |
|
1730 |
ciInstanceKlass* ik = base_k->as_instance_klass(); |
|
1731 |
// See if we can become precise: no subklasses and no interface |
|
1732 |
if (!ik->is_interface() && !ik->has_subklass()) { |
|
1733 |
//assert(!UseExactTypes, "this code should be useless with exact types"); |
|
1734 |
// Add a dependence; if any subclass added we need to recompile |
|
1735 |
if (!ik->is_final()) { |
|
1736 |
phase->C->dependencies()->assert_leaf_type(ik); |
|
1737 |
} |
|
1738 |
// Return precise array klass |
|
1739 |
return TypeKlassPtr::make(ak); |
|
1740 |
} |
|
1741 |
} |
|
1742 |
return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); |
|
1743 |
} else { // Found a type-array? |
|
1744 |
//assert(!UseExactTypes, "this code should be useless with exact types"); |
|
1745 |
assert( ak->is_type_array_klass(), "" ); |
|
1746 |
return TypeKlassPtr::make(ak); // These are always precise |
|
1747 |
} |
|
1748 |
} |
|
1749 |
} |
|
1750 |
||
1751 |
// Check for loading klass from an array klass |
|
1752 |
const TypeKlassPtr *tkls = tp->isa_klassptr(); |
|
1753 |
if (tkls != NULL && !StressReflectiveCode) { |
|
1754 |
ciKlass* klass = tkls->klass(); |
|
1755 |
if( !klass->is_loaded() ) |
|
1756 |
return _type; // Bail out if not loaded |
|
1757 |
if( klass->is_obj_array_klass() && |
|
1758 |
(uint)tkls->offset() == objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)) { |
|
1759 |
ciKlass* elem = klass->as_obj_array_klass()->element_klass(); |
|
1760 |
// // Always returning precise element type is incorrect, |
|
1761 |
// // e.g., element type could be object and array may contain strings |
|
1762 |
// return TypeKlassPtr::make(TypePtr::Constant, elem, 0); |
|
1763 |
||
1764 |
// The array's TypeKlassPtr was declared 'precise' or 'not precise' |
|
1765 |
// according to the element type's subclassing. |
|
1766 |
return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/); |
|
1767 |
} |
|
1768 |
if( klass->is_instance_klass() && tkls->klass_is_exact() && |
|
1769 |
(uint)tkls->offset() == Klass::super_offset_in_bytes() + sizeof(oopDesc)) { |
|
1770 |
ciKlass* sup = klass->as_instance_klass()->super(); |
|
1771 |
// The field is Klass::_super. Return its (constant) value. |
|
1772 |
// (Folds up the 2nd indirection in aClassConstant.getSuperClass().) |
|
1773 |
return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR; |
|
1774 |
} |
|
1775 |
} |
|
1776 |
||
1777 |
// Bailout case |
|
1778 |
return LoadNode::Value(phase); |
|
1779 |
} |
|
1780 |
||
1781 |
//------------------------------Identity--------------------------------------- |
|
1782 |
// To clean up reflective code, simplify k.java_mirror.as_klass to plain k. |
|
1783 |
// Also feed through the klass in Allocate(...klass...)._klass. |
|
1784 |
Node* LoadKlassNode::Identity( PhaseTransform *phase ) { |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1785 |
return klass_identity_common(phase); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1786 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1787 |
|
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1788 |
Node* LoadNode::klass_identity_common(PhaseTransform *phase ) { |
1 | 1789 |
Node* x = LoadNode::Identity(phase); |
1790 |
if (x != this) return x; |
|
1791 |
||
1792 |
// Take apart the address into an oop and and offset. |
|
1793 |
// Return 'this' if we cannot. |
|
1794 |
Node* adr = in(MemNode::Address); |
|
1795 |
intptr_t offset = 0; |
|
1796 |
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
1797 |
if (base == NULL) return this; |
|
1798 |
const TypeOopPtr* toop = phase->type(adr)->isa_oopptr(); |
|
1799 |
if (toop == NULL) return this; |
|
1800 |
||
1801 |
// We can fetch the klass directly through an AllocateNode. |
|
1802 |
// This works even if the klass is not constant (clone or newArray). |
|
1803 |
if (offset == oopDesc::klass_offset_in_bytes()) { |
|
1804 |
Node* allocated_klass = AllocateNode::Ideal_klass(base, phase); |
|
1805 |
if (allocated_klass != NULL) { |
|
1806 |
return allocated_klass; |
|
1807 |
} |
|
1808 |
} |
|
1809 |
||
1810 |
// Simplify k.java_mirror.as_klass to plain k, where k is a klassOop. |
|
1811 |
// Simplify ak.component_mirror.array_klass to plain ak, ak an arrayKlass. |
|
1812 |
// See inline_native_Class_query for occurrences of these patterns. |
|
1813 |
// Java Example: x.getClass().isAssignableFrom(y) |
|
1814 |
// Java Example: Array.newInstance(x.getClass().getComponentType(), n) |
|
1815 |
// |
|
1816 |
// This improves reflective code, often making the Class |
|
1817 |
// mirror go completely dead. (Current exception: Class |
|
1818 |
// mirrors may appear in debug info, but we could clean them out by |
|
1819 |
// introducing a new debug info operator for klassOop.java_mirror). |
|
1820 |
if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass() |
|
1821 |
&& (offset == java_lang_Class::klass_offset_in_bytes() || |
|
1822 |
offset == java_lang_Class::array_klass_offset_in_bytes())) { |
|
1823 |
// We are loading a special hidden field from a Class mirror, |
|
1824 |
// the field which points to its Klass or arrayKlass metaobject. |
|
1825 |
if (base->is_Load()) { |
|
1826 |
Node* adr2 = base->in(MemNode::Address); |
|
1827 |
const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); |
|
1828 |
if (tkls != NULL && !tkls->empty() |
|
1829 |
&& (tkls->klass()->is_instance_klass() || |
|
1830 |
tkls->klass()->is_array_klass()) |
|
1831 |
&& adr2->is_AddP() |
|
1832 |
) { |
|
1833 |
int mirror_field = Klass::java_mirror_offset_in_bytes(); |
|
1834 |
if (offset == java_lang_Class::array_klass_offset_in_bytes()) { |
|
1835 |
mirror_field = in_bytes(arrayKlass::component_mirror_offset()); |
|
1836 |
} |
|
1837 |
if (tkls->offset() == mirror_field + (int)sizeof(oopDesc)) { |
|
1838 |
return adr2->in(AddPNode::Base); |
|
1839 |
} |
|
1840 |
} |
|
1841 |
} |
|
1842 |
} |
|
1843 |
||
1844 |
return this; |
|
1845 |
} |
|
1846 |
||
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1847 |
|
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1848 |
//------------------------------Value------------------------------------------ |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1849 |
const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const { |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1850 |
const Type *t = klass_value_common(phase); |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1851 |
if (t == Type::TOP) |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1852 |
return t; |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1853 |
|
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1854 |
return t->make_narrowoop(); |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1855 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1856 |
|
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1857 |
//------------------------------Identity--------------------------------------- |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1858 |
// To clean up reflective code, simplify k.java_mirror.as_klass to narrow k. |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1859 |
// Also feed through the klass in Allocate(...klass...)._klass. |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1860 |
Node* LoadNKlassNode::Identity( PhaseTransform *phase ) { |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1861 |
Node *x = klass_identity_common(phase); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1862 |
|
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1863 |
const Type *t = phase->type( x ); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1864 |
if( t == Type::TOP ) return x; |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1865 |
if( t->isa_narrowoop()) return x; |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1866 |
|
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1867 |
return phase->transform(new (phase->C, 2) EncodePNode(x, t->make_narrowoop())); |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1868 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
1869 |
|
1 | 1870 |
//------------------------------Value----------------------------------------- |
1871 |
const Type *LoadRangeNode::Value( PhaseTransform *phase ) const { |
|
1872 |
// Either input is TOP ==> the result is TOP |
|
1873 |
const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
1874 |
if( t1 == Type::TOP ) return Type::TOP; |
|
1875 |
Node *adr = in(MemNode::Address); |
|
1876 |
const Type *t2 = phase->type( adr ); |
|
1877 |
if( t2 == Type::TOP ) return Type::TOP; |
|
1878 |
const TypePtr *tp = t2->is_ptr(); |
|
1879 |
if (TypePtr::above_centerline(tp->ptr())) return Type::TOP; |
|
1880 |
const TypeAryPtr *tap = tp->isa_aryptr(); |
|
1881 |
if( !tap ) return _type; |
|
1882 |
return tap->size(); |
|
1883 |
} |
|
1884 |
||
1885 |
//------------------------------Identity--------------------------------------- |
|
1886 |
// Feed through the length in AllocateArray(...length...)._length. |
|
1887 |
Node* LoadRangeNode::Identity( PhaseTransform *phase ) { |
|
1888 |
Node* x = LoadINode::Identity(phase); |
|
1889 |
if (x != this) return x; |
|
1890 |
||
1891 |
// Take apart the address into an oop and and offset. |
|
1892 |
// Return 'this' if we cannot. |
|
1893 |
Node* adr = in(MemNode::Address); |
|
1894 |
intptr_t offset = 0; |
|
1895 |
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
1896 |
if (base == NULL) return this; |
|
1897 |
const TypeAryPtr* tary = phase->type(adr)->isa_aryptr(); |
|
1898 |
if (tary == NULL) return this; |
|
1899 |
||
1900 |
// We can fetch the length directly through an AllocateArrayNode. |
|
1901 |
// This works even if the length is not constant (clone or newArray). |
|
1902 |
if (offset == arrayOopDesc::length_offset_in_bytes()) { |
|
1903 |
Node* allocated_length = AllocateArrayNode::Ideal_length(base, phase); |
|
1904 |
if (allocated_length != NULL) { |
|
1905 |
return allocated_length; |
|
1906 |
} |
|
1907 |
} |
|
1908 |
||
1909 |
return this; |
|
1910 |
||
1911 |
} |
|
1912 |
//============================================================================= |
|
1913 |
//---------------------------StoreNode::make----------------------------------- |
|
1914 |
// Polymorphic factory method: |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1915 |
StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1916 |
Compile* C = gvn.C; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1917 |
|
1 | 1918 |
switch (bt) { |
1919 |
case T_BOOLEAN: |
|
1920 |
case T_BYTE: return new (C, 4) StoreBNode(ctl, mem, adr, adr_type, val); |
|
1921 |
case T_INT: return new (C, 4) StoreINode(ctl, mem, adr, adr_type, val); |
|
1922 |
case T_CHAR: |
|
1923 |
case T_SHORT: return new (C, 4) StoreCNode(ctl, mem, adr, adr_type, val); |
|
1924 |
case T_LONG: return new (C, 4) StoreLNode(ctl, mem, adr, adr_type, val); |
|
1925 |
case T_FLOAT: return new (C, 4) StoreFNode(ctl, mem, adr, adr_type, val); |
|
1926 |
case T_DOUBLE: return new (C, 4) StoreDNode(ctl, mem, adr, adr_type, val); |
|
1927 |
case T_ADDRESS: |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1928 |
case T_OBJECT: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1929 |
#ifdef _LP64 |
589 | 1930 |
if (adr->bottom_type()->is_ptr_to_narrowoop() || |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1931 |
(UseCompressedOops && val->bottom_type()->isa_klassptr() && |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1932 |
adr->bottom_type()->isa_rawptr())) { |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1933 |
val = gvn.transform(new (C, 2) EncodePNode(val, val->bottom_type()->make_narrowoop())); |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1934 |
return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, val); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1935 |
} else |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
1936 |
#endif |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1937 |
{ |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1938 |
return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val); |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
1939 |
} |
1 | 1940 |
} |
1941 |
ShouldNotReachHere(); |
|
1942 |
return (StoreNode*)NULL; |
|
1943 |
} |
|
1944 |
||
1945 |
StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) { |
|
1946 |
bool require_atomic = true; |
|
1947 |
return new (C, 4) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic); |
|
1948 |
} |
|
1949 |
||
1950 |
||
1951 |
//--------------------------bottom_type---------------------------------------- |
|
1952 |
const Type *StoreNode::bottom_type() const { |
|
1953 |
return Type::MEMORY; |
|
1954 |
} |
|
1955 |
||
1956 |
//------------------------------hash------------------------------------------- |
|
1957 |
uint StoreNode::hash() const { |
|
1958 |
// unroll addition of interesting fields |
|
1959 |
//return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn); |
|
1960 |
||
1961 |
// Since they are not commoned, do not hash them: |
|
1962 |
return NO_HASH; |
|
1963 |
} |
|
1964 |
||
1965 |
//------------------------------Ideal------------------------------------------ |
|
1966 |
// Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x). |
|
1967 |
// When a store immediately follows a relevant allocation/initialization, |
|
1968 |
// try to capture it into the initialization, or hoist it above. |
|
1969 |
Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1970 |
Node* p = MemNode::Ideal_common(phase, can_reshape); |
|
1971 |
if (p) return (p == NodeSentinel) ? NULL : p; |
|
1972 |
||
1973 |
Node* mem = in(MemNode::Memory); |
|
1974 |
Node* address = in(MemNode::Address); |
|
1975 |
||
1976 |
// Back-to-back stores to same address? Fold em up. |
|
1977 |
// Generally unsafe if I have intervening uses... |
|
1978 |
if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address)) { |
|
1979 |
// Looking at a dead closed cycle of memory? |
|
1980 |
assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); |
|
1981 |
||
1982 |
assert(Opcode() == mem->Opcode() || |
|
1983 |
phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw, |
|
1984 |
"no mismatched stores, except on raw memory"); |
|
1985 |
||
1986 |
if (mem->outcnt() == 1 && // check for intervening uses |
|
1987 |
mem->as_Store()->memory_size() <= this->memory_size()) { |
|
1988 |
// If anybody other than 'this' uses 'mem', we cannot fold 'mem' away. |
|
1989 |
// For example, 'mem' might be the final state at a conditional return. |
|
1990 |
// Or, 'mem' might be used by some node which is live at the same time |
|
1991 |
// 'this' is live, which might be unschedulable. So, require exactly |
|
1992 |
// ONE user, the 'this' store, until such time as we clone 'mem' for |
|
1993 |
// each of 'mem's uses (thus making the exactly-1-user-rule hold true). |
|
1994 |
if (can_reshape) { // (%%% is this an anachronism?) |
|
1995 |
set_req_X(MemNode::Memory, mem->in(MemNode::Memory), |
|
1996 |
phase->is_IterGVN()); |
|
1997 |
} else { |
|
1998 |
// It's OK to do this in the parser, since DU info is always accurate, |
|
1999 |
// and the parser always refers to nodes via SafePointNode maps. |
|
2000 |
set_req(MemNode::Memory, mem->in(MemNode::Memory)); |
|
2001 |
} |
|
2002 |
return this; |
|
2003 |
} |
|
2004 |
} |
|
2005 |
||
2006 |
// Capture an unaliased, unconditional, simple store into an initializer. |
|
2007 |
// Or, if it is independent of the allocation, hoist it above the allocation. |
|
2008 |
if (ReduceFieldZeroing && /*can_reshape &&*/ |
|
2009 |
mem->is_Proj() && mem->in(0)->is_Initialize()) { |
|
2010 |
InitializeNode* init = mem->in(0)->as_Initialize(); |
|
2011 |
intptr_t offset = init->can_capture_store(this, phase); |
|
2012 |
if (offset > 0) { |
|
2013 |
Node* moved = init->capture_store(this, offset, phase); |
|
2014 |
// If the InitializeNode captured me, it made a raw copy of me, |
|
2015 |
// and I need to disappear. |
|
2016 |
if (moved != NULL) { |
|
2017 |
// %%% hack to ensure that Ideal returns a new node: |
|
2018 |
mem = MergeMemNode::make(phase->C, mem); |
|
2019 |
return mem; // fold me away |
|
2020 |
} |
|
2021 |
} |
|
2022 |
} |
|
2023 |
||
2024 |
return NULL; // No further progress |
|
2025 |
} |
|
2026 |
||
2027 |
//------------------------------Value----------------------------------------- |
|
2028 |
const Type *StoreNode::Value( PhaseTransform *phase ) const { |
|
2029 |
// Either input is TOP ==> the result is TOP |
|
2030 |
const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
2031 |
if( t1 == Type::TOP ) return Type::TOP; |
|
2032 |
const Type *t2 = phase->type( in(MemNode::Address) ); |
|
2033 |
if( t2 == Type::TOP ) return Type::TOP; |
|
2034 |
const Type *t3 = phase->type( in(MemNode::ValueIn) ); |
|
2035 |
if( t3 == Type::TOP ) return Type::TOP; |
|
2036 |
return Type::MEMORY; |
|
2037 |
} |
|
2038 |
||
2039 |
//------------------------------Identity--------------------------------------- |
|
2040 |
// Remove redundant stores: |
|
2041 |
// Store(m, p, Load(m, p)) changes to m. |
|
2042 |
// Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x). |
|
2043 |
Node *StoreNode::Identity( PhaseTransform *phase ) { |
|
2044 |
Node* mem = in(MemNode::Memory); |
|
2045 |
Node* adr = in(MemNode::Address); |
|
2046 |
Node* val = in(MemNode::ValueIn); |
|
2047 |
||
2048 |
// Load then Store? Then the Store is useless |
|
2049 |
if (val->is_Load() && |
|
2050 |
phase->eqv_uncast( val->in(MemNode::Address), adr ) && |
|
2051 |
phase->eqv_uncast( val->in(MemNode::Memory ), mem ) && |
|
2052 |
val->as_Load()->store_Opcode() == Opcode()) { |
|
2053 |
return mem; |
|
2054 |
} |
|
2055 |
||
2056 |
// Two stores in a row of the same value? |
|
2057 |
if (mem->is_Store() && |
|
2058 |
phase->eqv_uncast( mem->in(MemNode::Address), adr ) && |
|
2059 |
phase->eqv_uncast( mem->in(MemNode::ValueIn), val ) && |
|
2060 |
mem->Opcode() == Opcode()) { |
|
2061 |
return mem; |
|
2062 |
} |
|
2063 |
||
2064 |
// Store of zero anywhere into a freshly-allocated object? |
|
2065 |
// Then the store is useless. |
|
2066 |
// (It must already have been captured by the InitializeNode.) |
|
2067 |
if (ReduceFieldZeroing && phase->type(val)->is_zero_type()) { |
|
2068 |
// a newly allocated object is already all-zeroes everywhere |
|
2069 |
if (mem->is_Proj() && mem->in(0)->is_Allocate()) { |
|
2070 |
return mem; |
|
2071 |
} |
|
2072 |
||
2073 |
// the store may also apply to zero-bits in an earlier object |
|
2074 |
Node* prev_mem = find_previous_store(phase); |
|
2075 |
// Steps (a), (b): Walk past independent stores to find an exact match. |
|
2076 |
if (prev_mem != NULL) { |
|
2077 |
Node* prev_val = can_see_stored_value(prev_mem, phase); |
|
2078 |
if (prev_val != NULL && phase->eqv(prev_val, val)) { |
|
2079 |
// prev_val and val might differ by a cast; it would be good |
|
2080 |
// to keep the more informative of the two. |
|
2081 |
return mem; |
|
2082 |
} |
|
2083 |
} |
|
2084 |
} |
|
2085 |
||
2086 |
return this; |
|
2087 |
} |
|
2088 |
||
2089 |
//------------------------------match_edge------------------------------------- |
|
2090 |
// Do we Match on this edge index or not? Match only memory & value |
|
2091 |
uint StoreNode::match_edge(uint idx) const { |
|
2092 |
return idx == MemNode::Address || idx == MemNode::ValueIn; |
|
2093 |
} |
|
2094 |
||
2095 |
//------------------------------cmp-------------------------------------------- |
|
2096 |
// Do not common stores up together. They generally have to be split |
|
2097 |
// back up anyways, so do not bother. |
|
2098 |
uint StoreNode::cmp( const Node &n ) const { |
|
2099 |
return (&n == this); // Always fail except on self |
|
2100 |
} |
|
2101 |
||
2102 |
//------------------------------Ideal_masked_input----------------------------- |
|
2103 |
// Check for a useless mask before a partial-word store |
|
2104 |
// (StoreB ... (AndI valIn conIa) ) |
|
2105 |
// If (conIa & mask == mask) this simplifies to |
|
2106 |
// (StoreB ... (valIn) ) |
|
2107 |
Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) { |
|
2108 |
Node *val = in(MemNode::ValueIn); |
|
2109 |
if( val->Opcode() == Op_AndI ) { |
|
2110 |
const TypeInt *t = phase->type( val->in(2) )->isa_int(); |
|
2111 |
if( t && t->is_con() && (t->get_con() & mask) == mask ) { |
|
2112 |
set_req(MemNode::ValueIn, val->in(1)); |
|
2113 |
return this; |
|
2114 |
} |
|
2115 |
} |
|
2116 |
return NULL; |
|
2117 |
} |
|
2118 |
||
2119 |
||
2120 |
//------------------------------Ideal_sign_extended_input---------------------- |
|
2121 |
// Check for useless sign-extension before a partial-word store |
|
2122 |
// (StoreB ... (RShiftI _ (LShiftI _ valIn conIL ) conIR) ) |
|
2123 |
// If (conIL == conIR && conIR <= num_bits) this simplifies to |
|
2124 |
// (StoreB ... (valIn) ) |
|
2125 |
Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) { |
|
2126 |
Node *val = in(MemNode::ValueIn); |
|
2127 |
if( val->Opcode() == Op_RShiftI ) { |
|
2128 |
const TypeInt *t = phase->type( val->in(2) )->isa_int(); |
|
2129 |
if( t && t->is_con() && (t->get_con() <= num_bits) ) { |
|
2130 |
Node *shl = val->in(1); |
|
2131 |
if( shl->Opcode() == Op_LShiftI ) { |
|
2132 |
const TypeInt *t2 = phase->type( shl->in(2) )->isa_int(); |
|
2133 |
if( t2 && t2->is_con() && (t2->get_con() == t->get_con()) ) { |
|
2134 |
set_req(MemNode::ValueIn, shl->in(1)); |
|
2135 |
return this; |
|
2136 |
} |
|
2137 |
} |
|
2138 |
} |
|
2139 |
} |
|
2140 |
return NULL; |
|
2141 |
} |
|
2142 |
||
2143 |
//------------------------------value_never_loaded----------------------------------- |
|
2144 |
// Determine whether there are any possible loads of the value stored. |
|
2145 |
// For simplicity, we actually check if there are any loads from the |
|
2146 |
// address stored to, not just for loads of the value stored by this node. |
|
2147 |
// |
|
2148 |
bool StoreNode::value_never_loaded( PhaseTransform *phase) const { |
|
2149 |
Node *adr = in(Address); |
|
2150 |
const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr(); |
|
2151 |
if (adr_oop == NULL) |
|
2152 |
return false; |
|
769 | 2153 |
if (!adr_oop->is_known_instance_field()) |
1 | 2154 |
return false; // if not a distinct instance, there may be aliases of the address |
2155 |
for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) { |
|
2156 |
Node *use = adr->fast_out(i); |
|
2157 |
int opc = use->Opcode(); |
|
2158 |
if (use->is_Load() || use->is_LoadStore()) { |
|
2159 |
return false; |
|
2160 |
} |
|
2161 |
} |
|
2162 |
return true; |
|
2163 |
} |
|
2164 |
||
2165 |
//============================================================================= |
|
2166 |
//------------------------------Ideal------------------------------------------ |
|
2167 |
// If the store is from an AND mask that leaves the low bits untouched, then |
|
2168 |
// we can skip the AND operation. If the store is from a sign-extension |
|
2169 |
// (a left shift, then right shift) we can skip both. |
|
2170 |
Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2171 |
Node *progress = StoreNode::Ideal_masked_input(phase, 0xFF); |
|
2172 |
if( progress != NULL ) return progress; |
|
2173 |
||
2174 |
progress = StoreNode::Ideal_sign_extended_input(phase, 24); |
|
2175 |
if( progress != NULL ) return progress; |
|
2176 |
||
2177 |
// Finally check the default case |
|
2178 |
return StoreNode::Ideal(phase, can_reshape); |
|
2179 |
} |
|
2180 |
||
2181 |
//============================================================================= |
|
2182 |
//------------------------------Ideal------------------------------------------ |
|
2183 |
// If the store is from an AND mask that leaves the low bits untouched, then |
|
2184 |
// we can skip the AND operation |
|
2185 |
Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2186 |
Node *progress = StoreNode::Ideal_masked_input(phase, 0xFFFF); |
|
2187 |
if( progress != NULL ) return progress; |
|
2188 |
||
2189 |
progress = StoreNode::Ideal_sign_extended_input(phase, 16); |
|
2190 |
if( progress != NULL ) return progress; |
|
2191 |
||
2192 |
// Finally check the default case |
|
2193 |
return StoreNode::Ideal(phase, can_reshape); |
|
2194 |
} |
|
2195 |
||
2196 |
//============================================================================= |
|
2197 |
//------------------------------Identity--------------------------------------- |
|
2198 |
Node *StoreCMNode::Identity( PhaseTransform *phase ) { |
|
2199 |
// No need to card mark when storing a null ptr |
|
2200 |
Node* my_store = in(MemNode::OopStore); |
|
2201 |
if (my_store->is_Store()) { |
|
2202 |
const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) ); |
|
2203 |
if( t1 == TypePtr::NULL_PTR ) { |
|
2204 |
return in(MemNode::Memory); |
|
2205 |
} |
|
2206 |
} |
|
2207 |
return this; |
|
2208 |
} |
|
2209 |
||
2210 |
//------------------------------Value----------------------------------------- |
|
2211 |
const Type *StoreCMNode::Value( PhaseTransform *phase ) const { |
|
216 | 2212 |
// Either input is TOP ==> the result is TOP |
2213 |
const Type *t = phase->type( in(MemNode::Memory) ); |
|
2214 |
if( t == Type::TOP ) return Type::TOP; |
|
2215 |
t = phase->type( in(MemNode::Address) ); |
|
2216 |
if( t == Type::TOP ) return Type::TOP; |
|
2217 |
t = phase->type( in(MemNode::ValueIn) ); |
|
2218 |
if( t == Type::TOP ) return Type::TOP; |
|
1 | 2219 |
// If extra input is TOP ==> the result is TOP |
216 | 2220 |
t = phase->type( in(MemNode::OopStore) ); |
2221 |
if( t == Type::TOP ) return Type::TOP; |
|
1 | 2222 |
|
2223 |
return StoreNode::Value( phase ); |
|
2224 |
} |
|
2225 |
||
2226 |
||
2227 |
//============================================================================= |
|
2228 |
//----------------------------------SCMemProjNode------------------------------ |
|
2229 |
const Type * SCMemProjNode::Value( PhaseTransform *phase ) const |
|
2230 |
{ |
|
2231 |
return bottom_type(); |
|
2232 |
} |
|
2233 |
||
2234 |
//============================================================================= |
|
2235 |
LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : Node(5) { |
|
2236 |
init_req(MemNode::Control, c ); |
|
2237 |
init_req(MemNode::Memory , mem); |
|
2238 |
init_req(MemNode::Address, adr); |
|
2239 |
init_req(MemNode::ValueIn, val); |
|
2240 |
init_req( ExpectedIn, ex ); |
|
2241 |
init_class_id(Class_LoadStore); |
|
2242 |
||
2243 |
} |
|
2244 |
||
2245 |
//============================================================================= |
|
2246 |
//-------------------------------adr_type-------------------------------------- |
|
2247 |
// Do we Match on this edge index or not? Do not match memory |
|
2248 |
const TypePtr* ClearArrayNode::adr_type() const { |
|
2249 |
Node *adr = in(3); |
|
2250 |
return MemNode::calculate_adr_type(adr->bottom_type()); |
|
2251 |
} |
|
2252 |
||
2253 |
//------------------------------match_edge------------------------------------- |
|
2254 |
// Do we Match on this edge index or not? Do not match memory |
|
2255 |
uint ClearArrayNode::match_edge(uint idx) const { |
|
2256 |
return idx > 1; |
|
2257 |
} |
|
2258 |
||
2259 |
//------------------------------Identity--------------------------------------- |
|
2260 |
// Clearing a zero length array does nothing |
|
2261 |
Node *ClearArrayNode::Identity( PhaseTransform *phase ) { |
|
241
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2262 |
return phase->type(in(2))->higher_equal(TypeX::ZERO) ? in(1) : this; |
1 | 2263 |
} |
2264 |
||
2265 |
//------------------------------Idealize--------------------------------------- |
|
2266 |
// Clearing a short array is faster with stores |
|
2267 |
Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2268 |
const int unit = BytesPerLong; |
|
2269 |
const TypeX* t = phase->type(in(2))->isa_intptr_t(); |
|
2270 |
if (!t) return NULL; |
|
2271 |
if (!t->is_con()) return NULL; |
|
2272 |
intptr_t raw_count = t->get_con(); |
|
2273 |
intptr_t size = raw_count; |
|
2274 |
if (!Matcher::init_array_count_is_in_bytes) size *= unit; |
|
2275 |
// Clearing nothing uses the Identity call. |
|
2276 |
// Negative clears are possible on dead ClearArrays |
|
2277 |
// (see jck test stmt114.stmt11402.val). |
|
2278 |
if (size <= 0 || size % unit != 0) return NULL; |
|
2279 |
intptr_t count = size / unit; |
|
2280 |
// Length too long; use fast hardware clear |
|
2281 |
if (size > Matcher::init_array_short_size) return NULL; |
|
2282 |
Node *mem = in(1); |
|
2283 |
if( phase->type(mem)==Type::TOP ) return NULL; |
|
2284 |
Node *adr = in(3); |
|
2285 |
const Type* at = phase->type(adr); |
|
2286 |
if( at==Type::TOP ) return NULL; |
|
2287 |
const TypePtr* atp = at->isa_ptr(); |
|
2288 |
// adjust atp to be the correct array element address type |
|
2289 |
if (atp == NULL) atp = TypePtr::BOTTOM; |
|
2290 |
else atp = atp->add_offset(Type::OffsetBot); |
|
2291 |
// Get base for derived pointer purposes |
|
2292 |
if( adr->Opcode() != Op_AddP ) Unimplemented(); |
|
2293 |
Node *base = adr->in(1); |
|
2294 |
||
2295 |
Node *zero = phase->makecon(TypeLong::ZERO); |
|
2296 |
Node *off = phase->MakeConX(BytesPerLong); |
|
2297 |
mem = new (phase->C, 4) StoreLNode(in(0),mem,adr,atp,zero); |
|
2298 |
count--; |
|
2299 |
while( count-- ) { |
|
2300 |
mem = phase->transform(mem); |
|
2301 |
adr = phase->transform(new (phase->C, 4) AddPNode(base,adr,off)); |
|
2302 |
mem = new (phase->C, 4) StoreLNode(in(0),mem,adr,atp,zero); |
|
2303 |
} |
|
2304 |
return mem; |
|
2305 |
} |
|
2306 |
||
2307 |
//----------------------------clear_memory------------------------------------- |
|
2308 |
// Generate code to initialize object storage to zero. |
|
2309 |
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2310 |
intptr_t start_offset, |
|
2311 |
Node* end_offset, |
|
2312 |
PhaseGVN* phase) { |
|
2313 |
Compile* C = phase->C; |
|
2314 |
intptr_t offset = start_offset; |
|
2315 |
||
2316 |
int unit = BytesPerLong; |
|
2317 |
if ((offset % unit) != 0) { |
|
2318 |
Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(offset)); |
|
2319 |
adr = phase->transform(adr); |
|
2320 |
const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
2321 |
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); |
1 | 2322 |
mem = phase->transform(mem); |
2323 |
offset += BytesPerInt; |
|
2324 |
} |
|
2325 |
assert((offset % unit) == 0, ""); |
|
2326 |
||
2327 |
// Initialize the remaining stuff, if any, with a ClearArray. |
|
2328 |
return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase); |
|
2329 |
} |
|
2330 |
||
2331 |
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2332 |
Node* start_offset, |
|
2333 |
Node* end_offset, |
|
2334 |
PhaseGVN* phase) { |
|
241
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2335 |
if (start_offset == end_offset) { |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2336 |
// nothing to do |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2337 |
return mem; |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2338 |
} |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2339 |
|
1 | 2340 |
Compile* C = phase->C; |
2341 |
int unit = BytesPerLong; |
|
2342 |
Node* zbase = start_offset; |
|
2343 |
Node* zend = end_offset; |
|
2344 |
||
2345 |
// Scale to the unit required by the CPU: |
|
2346 |
if (!Matcher::init_array_count_is_in_bytes) { |
|
2347 |
Node* shift = phase->intcon(exact_log2(unit)); |
|
2348 |
zbase = phase->transform( new(C,3) URShiftXNode(zbase, shift) ); |
|
2349 |
zend = phase->transform( new(C,3) URShiftXNode(zend, shift) ); |
|
2350 |
} |
|
2351 |
||
2352 |
Node* zsize = phase->transform( new(C,3) SubXNode(zend, zbase) ); |
|
2353 |
Node* zinit = phase->zerocon((unit == BytesPerLong) ? T_LONG : T_INT); |
|
2354 |
||
2355 |
// Bulk clear double-words |
|
2356 |
Node* adr = phase->transform( new(C,4) AddPNode(dest, dest, start_offset) ); |
|
2357 |
mem = new (C, 4) ClearArrayNode(ctl, mem, zsize, adr); |
|
2358 |
return phase->transform(mem); |
|
2359 |
} |
|
2360 |
||
2361 |
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2362 |
intptr_t start_offset, |
|
2363 |
intptr_t end_offset, |
|
2364 |
PhaseGVN* phase) { |
|
241
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2365 |
if (start_offset == end_offset) { |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2366 |
// nothing to do |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2367 |
return mem; |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2368 |
} |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2369 |
|
1 | 2370 |
Compile* C = phase->C; |
2371 |
assert((end_offset % BytesPerInt) == 0, "odd end offset"); |
|
2372 |
intptr_t done_offset = end_offset; |
|
2373 |
if ((done_offset % BytesPerLong) != 0) { |
|
2374 |
done_offset -= BytesPerInt; |
|
2375 |
} |
|
2376 |
if (done_offset > start_offset) { |
|
2377 |
mem = clear_memory(ctl, mem, dest, |
|
2378 |
start_offset, phase->MakeConX(done_offset), phase); |
|
2379 |
} |
|
2380 |
if (done_offset < end_offset) { // emit the final 32-bit store |
|
2381 |
Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(done_offset)); |
|
2382 |
adr = phase->transform(adr); |
|
2383 |
const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
2384 |
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); |
1 | 2385 |
mem = phase->transform(mem); |
2386 |
done_offset += BytesPerInt; |
|
2387 |
} |
|
2388 |
assert(done_offset == end_offset, ""); |
|
2389 |
return mem; |
|
2390 |
} |
|
2391 |
||
2392 |
//============================================================================= |
|
2393 |
// Do we match on this edge? No memory edges |
|
2394 |
uint StrCompNode::match_edge(uint idx) const { |
|
2395 |
return idx == 5 || idx == 6; |
|
2396 |
} |
|
2397 |
||
2398 |
//------------------------------Ideal------------------------------------------ |
|
2399 |
// Return a node which is more "ideal" than the current node. Strip out |
|
2400 |
// control copies |
|
2401 |
Node *StrCompNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2402 |
return remove_dead_region(phase, can_reshape) ? this : NULL; |
|
2403 |
} |
|
2404 |
||
595
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
2405 |
//------------------------------Ideal------------------------------------------ |
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
2406 |
// Return a node which is more "ideal" than the current node. Strip out |
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
2407 |
// control copies |
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
2408 |
Node *AryEqNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
2409 |
return remove_dead_region(phase, can_reshape) ? this : NULL; |
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
2410 |
} |
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
2411 |
|
1 | 2412 |
|
2413 |
//============================================================================= |
|
2414 |
MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) |
|
2415 |
: MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)), |
|
2416 |
_adr_type(C->get_adr_type(alias_idx)) |
|
2417 |
{ |
|
2418 |
init_class_id(Class_MemBar); |
|
2419 |
Node* top = C->top(); |
|
2420 |
init_req(TypeFunc::I_O,top); |
|
2421 |
init_req(TypeFunc::FramePtr,top); |
|
2422 |
init_req(TypeFunc::ReturnAdr,top); |
|
2423 |
if (precedent != NULL) |
|
2424 |
init_req(TypeFunc::Parms, precedent); |
|
2425 |
} |
|
2426 |
||
2427 |
//------------------------------cmp-------------------------------------------- |
|
2428 |
uint MemBarNode::hash() const { return NO_HASH; } |
|
2429 |
uint MemBarNode::cmp( const Node &n ) const { |
|
2430 |
return (&n == this); // Always fail except on self |
|
2431 |
} |
|
2432 |
||
2433 |
//------------------------------make------------------------------------------- |
|
2434 |
MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { |
|
2435 |
int len = Precedent + (pn == NULL? 0: 1); |
|
2436 |
switch (opcode) { |
|
2437 |
case Op_MemBarAcquire: return new(C, len) MemBarAcquireNode(C, atp, pn); |
|
2438 |
case Op_MemBarRelease: return new(C, len) MemBarReleaseNode(C, atp, pn); |
|
2439 |
case Op_MemBarVolatile: return new(C, len) MemBarVolatileNode(C, atp, pn); |
|
2440 |
case Op_MemBarCPUOrder: return new(C, len) MemBarCPUOrderNode(C, atp, pn); |
|
2441 |
case Op_Initialize: return new(C, len) InitializeNode(C, atp, pn); |
|
2442 |
default: ShouldNotReachHere(); return NULL; |
|
2443 |
} |
|
2444 |
} |
|
2445 |
||
2446 |
//------------------------------Ideal------------------------------------------ |
|
2447 |
// Return a node which is more "ideal" than the current node. Strip out |
|
2448 |
// control copies |
|
2449 |
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
2450 |
if (remove_dead_region(phase, can_reshape)) return this; |
|
2451 |
return NULL; |
|
2452 |
} |
|
2453 |
||
2454 |
//------------------------------Value------------------------------------------ |
|
2455 |
const Type *MemBarNode::Value( PhaseTransform *phase ) const { |
|
2456 |
if( !in(0) ) return Type::TOP; |
|
2457 |
if( phase->type(in(0)) == Type::TOP ) |
|
2458 |
return Type::TOP; |
|
2459 |
return TypeTuple::MEMBAR; |
|
2460 |
} |
|
2461 |
||
2462 |
//------------------------------match------------------------------------------ |
|
2463 |
// Construct projections for memory. |
|
2464 |
Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) { |
|
2465 |
switch (proj->_con) { |
|
2466 |
case TypeFunc::Control: |
|
2467 |
case TypeFunc::Memory: |
|
2468 |
return new (m->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); |
|
2469 |
} |
|
2470 |
ShouldNotReachHere(); |
|
2471 |
return NULL; |
|
2472 |
} |
|
2473 |
||
2474 |
//===========================InitializeNode==================================== |
|
2475 |
// SUMMARY: |
|
2476 |
// This node acts as a memory barrier on raw memory, after some raw stores. |
|
2477 |
// The 'cooked' oop value feeds from the Initialize, not the Allocation. |
|
2478 |
// The Initialize can 'capture' suitably constrained stores as raw inits. |
|
2479 |
// It can coalesce related raw stores into larger units (called 'tiles'). |
|
2480 |
// It can avoid zeroing new storage for memory units which have raw inits. |
|
2481 |
// At macro-expansion, it is marked 'complete', and does not optimize further. |
|
2482 |
// |
|
2483 |
// EXAMPLE: |
|
2484 |
// The object 'new short[2]' occupies 16 bytes in a 32-bit machine. |
|
2485 |
// ctl = incoming control; mem* = incoming memory |
|
2486 |
// (Note: A star * on a memory edge denotes I/O and other standard edges.) |
|
2487 |
// First allocate uninitialized memory and fill in the header: |
|
2488 |
// alloc = (Allocate ctl mem* 16 #short[].klass ...) |
|
2489 |
// ctl := alloc.Control; mem* := alloc.Memory* |
|
2490 |
// rawmem = alloc.Memory; rawoop = alloc.RawAddress |
|
2491 |
// Then initialize to zero the non-header parts of the raw memory block: |
|
2492 |
// init = (Initialize alloc.Control alloc.Memory* alloc.RawAddress) |
|
2493 |
// ctl := init.Control; mem.SLICE(#short[*]) := init.Memory |
|
2494 |
// After the initialize node executes, the object is ready for service: |
|
2495 |
// oop := (CheckCastPP init.Control alloc.RawAddress #short[]) |
|
2496 |
// Suppose its body is immediately initialized as {1,2}: |
|
2497 |
// store1 = (StoreC init.Control init.Memory (+ oop 12) 1) |
|
2498 |
// store2 = (StoreC init.Control store1 (+ oop 14) 2) |
|
2499 |
// mem.SLICE(#short[*]) := store2 |
|
2500 |
// |
|
2501 |
// DETAILS: |
|
2502 |
// An InitializeNode collects and isolates object initialization after |
|
2503 |
// an AllocateNode and before the next possible safepoint. As a |
|
2504 |
// memory barrier (MemBarNode), it keeps critical stores from drifting |
|
2505 |
// down past any safepoint or any publication of the allocation. |
|
2506 |
// Before this barrier, a newly-allocated object may have uninitialized bits. |
|
2507 |
// After this barrier, it may be treated as a real oop, and GC is allowed. |
|
2508 |
// |
|
2509 |
// The semantics of the InitializeNode include an implicit zeroing of |
|
2510 |
// the new object from object header to the end of the object. |
|
2511 |
// (The object header and end are determined by the AllocateNode.) |
|
2512 |
// |
|
2513 |
// Certain stores may be added as direct inputs to the InitializeNode. |
|
2514 |
// These stores must update raw memory, and they must be to addresses |
|
2515 |
// derived from the raw address produced by AllocateNode, and with |
|
2516 |
// a constant offset. They must be ordered by increasing offset. |
|
2517 |
// The first one is at in(RawStores), the last at in(req()-1). |
|
2518 |
// Unlike most memory operations, they are not linked in a chain, |
|
2519 |
// but are displayed in parallel as users of the rawmem output of |
|
2520 |
// the allocation. |
|
2521 |
// |
|
2522 |
// (See comments in InitializeNode::capture_store, which continue |
|
2523 |
// the example given above.) |
|
2524 |
// |
|
2525 |
// When the associated Allocate is macro-expanded, the InitializeNode |
|
2526 |
// may be rewritten to optimize collected stores. A ClearArrayNode |
|
2527 |
// may also be created at that point to represent any required zeroing. |
|
2528 |
// The InitializeNode is then marked 'complete', prohibiting further |
|
2529 |
// capturing of nearby memory operations. |
|
2530 |
// |
|
2531 |
// During macro-expansion, all captured initializations which store |
|
2532 |
// constant values of 32 bits or smaller are coalesced (if advantagous) |
|
2533 |
// into larger 'tiles' 32 or 64 bits. This allows an object to be |
|
2534 |
// initialized in fewer memory operations. Memory words which are |
|
2535 |
// covered by neither tiles nor non-constant stores are pre-zeroed |
|
2536 |
// by explicit stores of zero. (The code shape happens to do all |
|
2537 |
// zeroing first, then all other stores, with both sequences occurring |
|
2538 |
// in order of ascending offsets.) |
|
2539 |
// |
|
2540 |
// Alternatively, code may be inserted between an AllocateNode and its |
|
2541 |
// InitializeNode, to perform arbitrary initialization of the new object. |
|
2542 |
// E.g., the object copying intrinsics insert complex data transfers here. |
|
2543 |
// The initialization must then be marked as 'complete' disable the |
|
2544 |
// built-in zeroing semantics and the collection of initializing stores. |
|
2545 |
// |
|
2546 |
// While an InitializeNode is incomplete, reads from the memory state |
|
2547 |
// produced by it are optimizable if they match the control edge and |
|
2548 |
// new oop address associated with the allocation/initialization. |
|
2549 |
// They return a stored value (if the offset matches) or else zero. |
|
2550 |
// A write to the memory state, if it matches control and address, |
|
2551 |
// and if it is to a constant offset, may be 'captured' by the |
|
2552 |
// InitializeNode. It is cloned as a raw memory operation and rewired |
|
2553 |
// inside the initialization, to the raw oop produced by the allocation. |
|
2554 |
// Operations on addresses which are provably distinct (e.g., to |
|
2555 |
// other AllocateNodes) are allowed to bypass the initialization. |
|
2556 |
// |
|
2557 |
// The effect of all this is to consolidate object initialization |
|
2558 |
// (both arrays and non-arrays, both piecewise and bulk) into a |
|
2559 |
// single location, where it can be optimized as a unit. |
|
2560 |
// |
|
2561 |
// Only stores with an offset less than TrackedInitializationLimit words |
|
2562 |
// will be considered for capture by an InitializeNode. This puts a |
|
2563 |
// reasonable limit on the complexity of optimized initializations. |
|
2564 |
||
2565 |
//---------------------------InitializeNode------------------------------------ |
|
2566 |
InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop) |
|
2567 |
: _is_complete(false), |
|
2568 |
MemBarNode(C, adr_type, rawoop) |
|
2569 |
{ |
|
2570 |
init_class_id(Class_Initialize); |
|
2571 |
||
2572 |
assert(adr_type == Compile::AliasIdxRaw, "only valid atp"); |
|
2573 |
assert(in(RawAddress) == rawoop, "proper init"); |
|
2574 |
// Note: allocation() can be NULL, for secondary initialization barriers |
|
2575 |
} |
|
2576 |
||
2577 |
// Since this node is not matched, it will be processed by the |
|
2578 |
// register allocator. Declare that there are no constraints |
|
2579 |
// on the allocation of the RawAddress edge. |
|
2580 |
const RegMask &InitializeNode::in_RegMask(uint idx) const { |
|
2581 |
// This edge should be set to top, by the set_complete. But be conservative. |
|
2582 |
if (idx == InitializeNode::RawAddress) |
|
2583 |
return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]); |
|
2584 |
return RegMask::Empty; |
|
2585 |
} |
|
2586 |
||
2587 |
Node* InitializeNode::memory(uint alias_idx) { |
|
2588 |
Node* mem = in(Memory); |
|
2589 |
if (mem->is_MergeMem()) { |
|
2590 |
return mem->as_MergeMem()->memory_at(alias_idx); |
|
2591 |
} else { |
|
2592 |
// incoming raw memory is not split |
|
2593 |
return mem; |
|
2594 |
} |
|
2595 |
} |
|
2596 |
||
2597 |
bool InitializeNode::is_non_zero() { |
|
2598 |
if (is_complete()) return false; |
|
2599 |
remove_extra_zeroes(); |
|
2600 |
return (req() > RawStores); |
|
2601 |
} |
|
2602 |
||
2603 |
void InitializeNode::set_complete(PhaseGVN* phase) { |
|
2604 |
assert(!is_complete(), "caller responsibility"); |
|
2605 |
_is_complete = true; |
|
2606 |
||
2607 |
// After this node is complete, it contains a bunch of |
|
2608 |
// raw-memory initializations. There is no need for |
|
2609 |
// it to have anything to do with non-raw memory effects. |
|
2610 |
// Therefore, tell all non-raw users to re-optimize themselves, |
|
2611 |
// after skipping the memory effects of this initialization. |
|
2612 |
PhaseIterGVN* igvn = phase->is_IterGVN(); |
|
2613 |
if (igvn) igvn->add_users_to_worklist(this); |
|
2614 |
} |
|
2615 |
||
2616 |
// convenience function |
|
2617 |
// return false if the init contains any stores already |
|
2618 |
bool AllocateNode::maybe_set_complete(PhaseGVN* phase) { |
|
2619 |
InitializeNode* init = initialization(); |
|
2620 |
if (init == NULL || init->is_complete()) return false; |
|
2621 |
init->remove_extra_zeroes(); |
|
2622 |
// for now, if this allocation has already collected any inits, bail: |
|
2623 |
if (init->is_non_zero()) return false; |
|
2624 |
init->set_complete(phase); |
|
2625 |
return true; |
|
2626 |
} |
|
2627 |
||
2628 |
void InitializeNode::remove_extra_zeroes() { |
|
2629 |
if (req() == RawStores) return; |
|
2630 |
Node* zmem = zero_memory(); |
|
2631 |
uint fill = RawStores; |
|
2632 |
for (uint i = fill; i < req(); i++) { |
|
2633 |
Node* n = in(i); |
|
2634 |
if (n->is_top() || n == zmem) continue; // skip |
|
2635 |
if (fill < i) set_req(fill, n); // compact |
|
2636 |
++fill; |
|
2637 |
} |
|
2638 |
// delete any empty spaces created: |
|
2639 |
while (fill < req()) { |
|
2640 |
del_req(fill); |
|
2641 |
} |
|
2642 |
} |
|
2643 |
||
2644 |
// Helper for remembering which stores go with which offsets. |
|
2645 |
intptr_t InitializeNode::get_store_offset(Node* st, PhaseTransform* phase) { |
|
2646 |
if (!st->is_Store()) return -1; // can happen to dead code via subsume_node |
|
2647 |
intptr_t offset = -1; |
|
2648 |
Node* base = AddPNode::Ideal_base_and_offset(st->in(MemNode::Address), |
|
2649 |
phase, offset); |
|
2650 |
if (base == NULL) return -1; // something is dead, |
|
2651 |
if (offset < 0) return -1; // dead, dead |
|
2652 |
return offset; |
|
2653 |
} |
|
2654 |
||
2655 |
// Helper for proving that an initialization expression is |
|
2656 |
// "simple enough" to be folded into an object initialization. |
|
2657 |
// Attempts to prove that a store's initial value 'n' can be captured |
|
2658 |
// within the initialization without creating a vicious cycle, such as: |
|
2659 |
// { Foo p = new Foo(); p.next = p; } |
|
2660 |
// True for constants and parameters and small combinations thereof. |
|
2661 |
bool InitializeNode::detect_init_independence(Node* n, |
|
2662 |
bool st_is_pinned, |
|
2663 |
int& count) { |
|
2664 |
if (n == NULL) return true; // (can this really happen?) |
|
2665 |
if (n->is_Proj()) n = n->in(0); |
|
2666 |
if (n == this) return false; // found a cycle |
|
2667 |
if (n->is_Con()) return true; |
|
2668 |
if (n->is_Start()) return true; // params, etc., are OK |
|
2669 |
if (n->is_Root()) return true; // even better |
|
2670 |
||
2671 |
Node* ctl = n->in(0); |
|
2672 |
if (ctl != NULL && !ctl->is_top()) { |
|
2673 |
if (ctl->is_Proj()) ctl = ctl->in(0); |
|
2674 |
if (ctl == this) return false; |
|
2675 |
||
2676 |
// If we already know that the enclosing memory op is pinned right after |
|
2677 |
// the init, then any control flow that the store has picked up |
|
2678 |
// must have preceded the init, or else be equal to the init. |
|
2679 |
// Even after loop optimizations (which might change control edges) |
|
2680 |
// a store is never pinned *before* the availability of its inputs. |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
2681 |
if (!MemNode::all_controls_dominate(n, this)) |
1 | 2682 |
return false; // failed to prove a good control |
2683 |
||
2684 |
} |
|
2685 |
||
2686 |
// Check data edges for possible dependencies on 'this'. |
|
2687 |
if ((count += 1) > 20) return false; // complexity limit |
|
2688 |
for (uint i = 1; i < n->req(); i++) { |
|
2689 |
Node* m = n->in(i); |
|
2690 |
if (m == NULL || m == n || m->is_top()) continue; |
|
2691 |
uint first_i = n->find_edge(m); |
|
2692 |
if (i != first_i) continue; // process duplicate edge just once |
|
2693 |
if (!detect_init_independence(m, st_is_pinned, count)) { |
|
2694 |
return false; |
|
2695 |
} |
|
2696 |
} |
|
2697 |
||
2698 |
return true; |
|
2699 |
} |
|
2700 |
||
2701 |
// Here are all the checks a Store must pass before it can be moved into |
|
2702 |
// an initialization. Returns zero if a check fails. |
|
2703 |
// On success, returns the (constant) offset to which the store applies, |
|
2704 |
// within the initialized memory. |
|
2705 |
intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase) { |
|
2706 |
const int FAIL = 0; |
|
2707 |
if (st->req() != MemNode::ValueIn + 1) |
|
2708 |
return FAIL; // an inscrutable StoreNode (card mark?) |
|
2709 |
Node* ctl = st->in(MemNode::Control); |
|
2710 |
if (!(ctl != NULL && ctl->is_Proj() && ctl->in(0) == this)) |
|
2711 |
return FAIL; // must be unconditional after the initialization |
|
2712 |
Node* mem = st->in(MemNode::Memory); |
|
2713 |
if (!(mem->is_Proj() && mem->in(0) == this)) |
|
2714 |
return FAIL; // must not be preceded by other stores |
|
2715 |
Node* adr = st->in(MemNode::Address); |
|
2716 |
intptr_t offset; |
|
2717 |
AllocateNode* alloc = AllocateNode::Ideal_allocation(adr, phase, offset); |
|
2718 |
if (alloc == NULL) |
|
2719 |
return FAIL; // inscrutable address |
|
2720 |
if (alloc != allocation()) |
|
2721 |
return FAIL; // wrong allocation! (store needs to float up) |
|
2722 |
Node* val = st->in(MemNode::ValueIn); |
|
2723 |
int complexity_count = 0; |
|
2724 |
if (!detect_init_independence(val, true, complexity_count)) |
|
2725 |
return FAIL; // stored value must be 'simple enough' |
|
2726 |
||
2727 |
return offset; // success |
|
2728 |
} |
|
2729 |
||
2730 |
// Find the captured store in(i) which corresponds to the range |
|
2731 |
// [start..start+size) in the initialized object. |
|
2732 |
// If there is one, return its index i. If there isn't, return the |
|
2733 |
// negative of the index where it should be inserted. |
|
2734 |
// Return 0 if the queried range overlaps an initialization boundary |
|
2735 |
// or if dead code is encountered. |
|
2736 |
// If size_in_bytes is zero, do not bother with overlap checks. |
|
2737 |
int InitializeNode::captured_store_insertion_point(intptr_t start, |
|
2738 |
int size_in_bytes, |
|
2739 |
PhaseTransform* phase) { |
|
2740 |
const int FAIL = 0, MAX_STORE = BytesPerLong; |
|
2741 |
||
2742 |
if (is_complete()) |
|
2743 |
return FAIL; // arraycopy got here first; punt |
|
2744 |
||
2745 |
assert(allocation() != NULL, "must be present"); |
|
2746 |
||
2747 |
// no negatives, no header fields: |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
2748 |
if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL; |
1 | 2749 |
|
2750 |
// after a certain size, we bail out on tracking all the stores: |
|
2751 |
intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); |
|
2752 |
if (start >= ti_limit) return FAIL; |
|
2753 |
||
2754 |
for (uint i = InitializeNode::RawStores, limit = req(); ; ) { |
|
2755 |
if (i >= limit) return -(int)i; // not found; here is where to put it |
|
2756 |
||
2757 |
Node* st = in(i); |
|
2758 |
intptr_t st_off = get_store_offset(st, phase); |
|
2759 |
if (st_off < 0) { |
|
2760 |
if (st != zero_memory()) { |
|
2761 |
return FAIL; // bail out if there is dead garbage |
|
2762 |
} |
|
2763 |
} else if (st_off > start) { |
|
2764 |
// ...we are done, since stores are ordered |
|
2765 |
if (st_off < start + size_in_bytes) { |
|
2766 |
return FAIL; // the next store overlaps |
|
2767 |
} |
|
2768 |
return -(int)i; // not found; here is where to put it |
|
2769 |
} else if (st_off < start) { |
|
2770 |
if (size_in_bytes != 0 && |
|
2771 |
start < st_off + MAX_STORE && |
|
2772 |
start < st_off + st->as_Store()->memory_size()) { |
|
2773 |
return FAIL; // the previous store overlaps |
|
2774 |
} |
|
2775 |
} else { |
|
2776 |
if (size_in_bytes != 0 && |
|
2777 |
st->as_Store()->memory_size() != size_in_bytes) { |
|
2778 |
return FAIL; // mismatched store size |
|
2779 |
} |
|
2780 |
return i; |
|
2781 |
} |
|
2782 |
||
2783 |
++i; |
|
2784 |
} |
|
2785 |
} |
|
2786 |
||
2787 |
// Look for a captured store which initializes at the offset 'start' |
|
2788 |
// with the given size. If there is no such store, and no other |
|
2789 |
// initialization interferes, then return zero_memory (the memory |
|
2790 |
// projection of the AllocateNode). |
|
2791 |
Node* InitializeNode::find_captured_store(intptr_t start, int size_in_bytes, |
|
2792 |
PhaseTransform* phase) { |
|
2793 |
assert(stores_are_sane(phase), ""); |
|
2794 |
int i = captured_store_insertion_point(start, size_in_bytes, phase); |
|
2795 |
if (i == 0) { |
|
2796 |
return NULL; // something is dead |
|
2797 |
} else if (i < 0) { |
|
2798 |
return zero_memory(); // just primordial zero bits here |
|
2799 |
} else { |
|
2800 |
Node* st = in(i); // here is the store at this position |
|
2801 |
assert(get_store_offset(st->as_Store(), phase) == start, "sanity"); |
|
2802 |
return st; |
|
2803 |
} |
|
2804 |
} |
|
2805 |
||
2806 |
// Create, as a raw pointer, an address within my new object at 'offset'. |
|
2807 |
Node* InitializeNode::make_raw_address(intptr_t offset, |
|
2808 |
PhaseTransform* phase) { |
|
2809 |
Node* addr = in(RawAddress); |
|
2810 |
if (offset != 0) { |
|
2811 |
Compile* C = phase->C; |
|
2812 |
addr = phase->transform( new (C, 4) AddPNode(C->top(), addr, |
|
2813 |
phase->MakeConX(offset)) ); |
|
2814 |
} |
|
2815 |
return addr; |
|
2816 |
} |
|
2817 |
||
2818 |
// Clone the given store, converting it into a raw store |
|
2819 |
// initializing a field or element of my new object. |
|
2820 |
// Caller is responsible for retiring the original store, |
|
2821 |
// with subsume_node or the like. |
|
2822 |
// |
|
2823 |
// From the example above InitializeNode::InitializeNode, |
|
2824 |
// here are the old stores to be captured: |
|
2825 |
// store1 = (StoreC init.Control init.Memory (+ oop 12) 1) |
|
2826 |
// store2 = (StoreC init.Control store1 (+ oop 14) 2) |
|
2827 |
// |
|
2828 |
// Here is the changed code; note the extra edges on init: |
|
2829 |
// alloc = (Allocate ...) |
|
2830 |
// rawoop = alloc.RawAddress |
|
2831 |
// rawstore1 = (StoreC alloc.Control alloc.Memory (+ rawoop 12) 1) |
|
2832 |
// rawstore2 = (StoreC alloc.Control alloc.Memory (+ rawoop 14) 2) |
|
2833 |
// init = (Initialize alloc.Control alloc.Memory rawoop |
|
2834 |
// rawstore1 rawstore2) |
|
2835 |
// |
|
2836 |
Node* InitializeNode::capture_store(StoreNode* st, intptr_t start, |
|
2837 |
PhaseTransform* phase) { |
|
2838 |
assert(stores_are_sane(phase), ""); |
|
2839 |
||
2840 |
if (start < 0) return NULL; |
|
2841 |
assert(can_capture_store(st, phase) == start, "sanity"); |
|
2842 |
||
2843 |
Compile* C = phase->C; |
|
2844 |
int size_in_bytes = st->memory_size(); |
|
2845 |
int i = captured_store_insertion_point(start, size_in_bytes, phase); |
|
2846 |
if (i == 0) return NULL; // bail out |
|
2847 |
Node* prev_mem = NULL; // raw memory for the captured store |
|
2848 |
if (i > 0) { |
|
2849 |
prev_mem = in(i); // there is a pre-existing store under this one |
|
2850 |
set_req(i, C->top()); // temporarily disconnect it |
|
2851 |
// See StoreNode::Ideal 'st->outcnt() == 1' for the reason to disconnect. |
|
2852 |
} else { |
|
2853 |
i = -i; // no pre-existing store |
|
2854 |
prev_mem = zero_memory(); // a slice of the newly allocated object |
|
2855 |
if (i > InitializeNode::RawStores && in(i-1) == prev_mem) |
|
2856 |
set_req(--i, C->top()); // reuse this edge; it has been folded away |
|
2857 |
else |
|
2858 |
ins_req(i, C->top()); // build a new edge |
|
2859 |
} |
|
2860 |
Node* new_st = st->clone(); |
|
2861 |
new_st->set_req(MemNode::Control, in(Control)); |
|
2862 |
new_st->set_req(MemNode::Memory, prev_mem); |
|
2863 |
new_st->set_req(MemNode::Address, make_raw_address(start, phase)); |
|
2864 |
new_st = phase->transform(new_st); |
|
2865 |
||
2866 |
// At this point, new_st might have swallowed a pre-existing store |
|
2867 |
// at the same offset, or perhaps new_st might have disappeared, |
|
2868 |
// if it redundantly stored the same value (or zero to fresh memory). |
|
2869 |
||
2870 |
// In any case, wire it in: |
|
2871 |
set_req(i, new_st); |
|
2872 |
||
2873 |
// The caller may now kill the old guy. |
|
2874 |
DEBUG_ONLY(Node* check_st = find_captured_store(start, size_in_bytes, phase)); |
|
2875 |
assert(check_st == new_st || check_st == NULL, "must be findable"); |
|
2876 |
assert(!is_complete(), ""); |
|
2877 |
return new_st; |
|
2878 |
} |
|
2879 |
||
2880 |
static bool store_constant(jlong* tiles, int num_tiles, |
|
2881 |
intptr_t st_off, int st_size, |
|
2882 |
jlong con) { |
|
2883 |
if ((st_off & (st_size-1)) != 0) |
|
2884 |
return false; // strange store offset (assume size==2**N) |
|
2885 |
address addr = (address)tiles + st_off; |
|
2886 |
assert(st_off >= 0 && addr+st_size <= (address)&tiles[num_tiles], "oob"); |
|
2887 |
switch (st_size) { |
|
2888 |
case sizeof(jbyte): *(jbyte*) addr = (jbyte) con; break; |
|
2889 |
case sizeof(jchar): *(jchar*) addr = (jchar) con; break; |
|
2890 |
case sizeof(jint): *(jint*) addr = (jint) con; break; |
|
2891 |
case sizeof(jlong): *(jlong*) addr = (jlong) con; break; |
|
2892 |
default: return false; // strange store size (detect size!=2**N here) |
|
2893 |
} |
|
2894 |
return true; // return success to caller |
|
2895 |
} |
|
2896 |
||
2897 |
// Coalesce subword constants into int constants and possibly |
|
2898 |
// into long constants. The goal, if the CPU permits, |
|
2899 |
// is to initialize the object with a small number of 64-bit tiles. |
|
2900 |
// Also, convert floating-point constants to bit patterns. |
|
2901 |
// Non-constants are not relevant to this pass. |
|
2902 |
// |
|
2903 |
// In terms of the running example on InitializeNode::InitializeNode |
|
2904 |
// and InitializeNode::capture_store, here is the transformation |
|
2905 |
// of rawstore1 and rawstore2 into rawstore12: |
|
2906 |
// alloc = (Allocate ...) |
|
2907 |
// rawoop = alloc.RawAddress |
|
2908 |
// tile12 = 0x00010002 |
|
2909 |
// rawstore12 = (StoreI alloc.Control alloc.Memory (+ rawoop 12) tile12) |
|
2910 |
// init = (Initialize alloc.Control alloc.Memory rawoop rawstore12) |
|
2911 |
// |
|
2912 |
void |
|
2913 |
InitializeNode::coalesce_subword_stores(intptr_t header_size, |
|
2914 |
Node* size_in_bytes, |
|
2915 |
PhaseGVN* phase) { |
|
2916 |
Compile* C = phase->C; |
|
2917 |
||
2918 |
assert(stores_are_sane(phase), ""); |
|
2919 |
// Note: After this pass, they are not completely sane, |
|
2920 |
// since there may be some overlaps. |
|
2921 |
||
2922 |
int old_subword = 0, old_long = 0, new_int = 0, new_long = 0; |
|
2923 |
||
2924 |
intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); |
|
2925 |
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit); |
|
2926 |
size_limit = MIN2(size_limit, ti_limit); |
|
2927 |
size_limit = align_size_up(size_limit, BytesPerLong); |
|
2928 |
int num_tiles = size_limit / BytesPerLong; |
|
2929 |
||
2930 |
// allocate space for the tile map: |
|
2931 |
const int small_len = DEBUG_ONLY(true ? 3 :) 30; // keep stack frames small |
|
2932 |
jlong tiles_buf[small_len]; |
|
2933 |
Node* nodes_buf[small_len]; |
|
2934 |
jlong inits_buf[small_len]; |
|
2935 |
jlong* tiles = ((num_tiles <= small_len) ? &tiles_buf[0] |
|
2936 |
: NEW_RESOURCE_ARRAY(jlong, num_tiles)); |
|
2937 |
Node** nodes = ((num_tiles <= small_len) ? &nodes_buf[0] |
|
2938 |
: NEW_RESOURCE_ARRAY(Node*, num_tiles)); |
|
2939 |
jlong* inits = ((num_tiles <= small_len) ? &inits_buf[0] |
|
2940 |
: NEW_RESOURCE_ARRAY(jlong, num_tiles)); |
|
2941 |
// tiles: exact bitwise model of all primitive constants |
|
2942 |
// nodes: last constant-storing node subsumed into the tiles model |
|
2943 |
// inits: which bytes (in each tile) are touched by any initializations |
|
2944 |
||
2945 |
//// Pass A: Fill in the tile model with any relevant stores. |
|
2946 |
||
2947 |
Copy::zero_to_bytes(tiles, sizeof(tiles[0]) * num_tiles); |
|
2948 |
Copy::zero_to_bytes(nodes, sizeof(nodes[0]) * num_tiles); |
|
2949 |
Copy::zero_to_bytes(inits, sizeof(inits[0]) * num_tiles); |
|
2950 |
Node* zmem = zero_memory(); // initially zero memory state |
|
2951 |
for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) { |
|
2952 |
Node* st = in(i); |
|
2953 |
intptr_t st_off = get_store_offset(st, phase); |
|
2954 |
||
2955 |
// Figure out the store's offset and constant value: |
|
2956 |
if (st_off < header_size) continue; //skip (ignore header) |
|
2957 |
if (st->in(MemNode::Memory) != zmem) continue; //skip (odd store chain) |
|
2958 |
int st_size = st->as_Store()->memory_size(); |
|
2959 |
if (st_off + st_size > size_limit) break; |
|
2960 |
||
2961 |
// Record which bytes are touched, whether by constant or not. |
|
2962 |
if (!store_constant(inits, num_tiles, st_off, st_size, (jlong) -1)) |
|
2963 |
continue; // skip (strange store size) |
|
2964 |
||
2965 |
const Type* val = phase->type(st->in(MemNode::ValueIn)); |
|
2966 |
if (!val->singleton()) continue; //skip (non-con store) |
|
2967 |
BasicType type = val->basic_type(); |
|
2968 |
||
2969 |
jlong con = 0; |
|
2970 |
switch (type) { |
|
2971 |
case T_INT: con = val->is_int()->get_con(); break; |
|
2972 |
case T_LONG: con = val->is_long()->get_con(); break; |
|
2973 |
case T_FLOAT: con = jint_cast(val->getf()); break; |
|
2974 |
case T_DOUBLE: con = jlong_cast(val->getd()); break; |
|
2975 |
default: continue; //skip (odd store type) |
|
2976 |
} |
|
2977 |
||
2978 |
if (type == T_LONG && Matcher::isSimpleConstant64(con) && |
|
2979 |
st->Opcode() == Op_StoreL) { |
|
2980 |
continue; // This StoreL is already optimal. |
|
2981 |
} |
|
2982 |
||
2983 |
// Store down the constant. |
|
2984 |
store_constant(tiles, num_tiles, st_off, st_size, con); |
|
2985 |
||
2986 |
intptr_t j = st_off >> LogBytesPerLong; |
|
2987 |
||
2988 |
if (type == T_INT && st_size == BytesPerInt |
|
2989 |
&& (st_off & BytesPerInt) == BytesPerInt) { |
|
2990 |
jlong lcon = tiles[j]; |
|
2991 |
if (!Matcher::isSimpleConstant64(lcon) && |
|
2992 |
st->Opcode() == Op_StoreI) { |
|
2993 |
// This StoreI is already optimal by itself. |
|
2994 |
jint* intcon = (jint*) &tiles[j]; |
|
2995 |
intcon[1] = 0; // undo the store_constant() |
|
2996 |
||
2997 |
// If the previous store is also optimal by itself, back up and |
|
2998 |
// undo the action of the previous loop iteration... if we can. |
|
2999 |
// But if we can't, just let the previous half take care of itself. |
|
3000 |
st = nodes[j]; |
|
3001 |
st_off -= BytesPerInt; |
|
3002 |
con = intcon[0]; |
|
3003 |
if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) { |
|
3004 |
assert(st_off >= header_size, "still ignoring header"); |
|
3005 |
assert(get_store_offset(st, phase) == st_off, "must be"); |
|
3006 |
assert(in(i-1) == zmem, "must be"); |
|
3007 |
DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn))); |
|
3008 |
assert(con == tcon->is_int()->get_con(), "must be"); |
|
3009 |
// Undo the effects of the previous loop trip, which swallowed st: |
|
3010 |
intcon[0] = 0; // undo store_constant() |
|
3011 |
set_req(i-1, st); // undo set_req(i, zmem) |
|
3012 |
nodes[j] = NULL; // undo nodes[j] = st |
|
3013 |
--old_subword; // undo ++old_subword |
|
3014 |
} |
|
3015 |
continue; // This StoreI is already optimal. |
|
3016 |
} |
|
3017 |
} |
|
3018 |
||
3019 |
// This store is not needed. |
|
3020 |
set_req(i, zmem); |
|
3021 |
nodes[j] = st; // record for the moment |
|
3022 |
if (st_size < BytesPerLong) // something has changed |
|
3023 |
++old_subword; // includes int/float, but who's counting... |
|
3024 |
else ++old_long; |
|
3025 |
} |
|
3026 |
||
3027 |
if ((old_subword + old_long) == 0) |
|
3028 |
return; // nothing more to do |
|
3029 |
||
3030 |
//// Pass B: Convert any non-zero tiles into optimal constant stores. |
|
3031 |
// Be sure to insert them before overlapping non-constant stores. |
|
3032 |
// (E.g., byte[] x = { 1,2,y,4 } => x[int 0] = 0x01020004, x[2]=y.) |
|
3033 |
for (int j = 0; j < num_tiles; j++) { |
|
3034 |
jlong con = tiles[j]; |
|
3035 |
jlong init = inits[j]; |
|
3036 |
if (con == 0) continue; |
|
3037 |
jint con0, con1; // split the constant, address-wise |
|
3038 |
jint init0, init1; // split the init map, address-wise |
|
3039 |
{ union { jlong con; jint intcon[2]; } u; |
|
3040 |
u.con = con; |
|
3041 |
con0 = u.intcon[0]; |
|
3042 |
con1 = u.intcon[1]; |
|
3043 |
u.con = init; |
|
3044 |
init0 = u.intcon[0]; |
|
3045 |
init1 = u.intcon[1]; |
|
3046 |
} |
|
3047 |
||
3048 |
Node* old = nodes[j]; |
|
3049 |
assert(old != NULL, "need the prior store"); |
|
3050 |
intptr_t offset = (j * BytesPerLong); |
|
3051 |
||
3052 |
bool split = !Matcher::isSimpleConstant64(con); |
|
3053 |
||
3054 |
if (offset < header_size) { |
|
3055 |
assert(offset + BytesPerInt >= header_size, "second int counts"); |
|
3056 |
assert(*(jint*)&tiles[j] == 0, "junk in header"); |
|
3057 |
split = true; // only the second word counts |
|
3058 |
// Example: int a[] = { 42 ... } |
|
3059 |
} else if (con0 == 0 && init0 == -1) { |
|
3060 |
split = true; // first word is covered by full inits |
|
3061 |
// Example: int a[] = { ... foo(), 42 ... } |
|
3062 |
} else if (con1 == 0 && init1 == -1) { |
|
3063 |
split = true; // second word is covered by full inits |
|
3064 |
// Example: int a[] = { ... 42, foo() ... } |
|
3065 |
} |
|
3066 |
||
3067 |
// Here's a case where init0 is neither 0 nor -1: |
|
3068 |
// byte a[] = { ... 0,0,foo(),0, 0,0,0,42 ... } |
|
3069 |
// Assuming big-endian memory, init0, init1 are 0x0000FF00, 0x000000FF. |
|
3070 |
// In this case the tile is not split; it is (jlong)42. |
|
3071 |
// The big tile is stored down, and then the foo() value is inserted. |
|
3072 |
// (If there were foo(),foo() instead of foo(),0, init0 would be -1.) |
|
3073 |
||
3074 |
Node* ctl = old->in(MemNode::Control); |
|
3075 |
Node* adr = make_raw_address(offset, phase); |
|
3076 |
const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
3077 |
||
3078 |
// One or two coalesced stores to plop down. |
|
3079 |
Node* st[2]; |
|
3080 |
intptr_t off[2]; |
|
3081 |
int nst = 0; |
|
3082 |
if (!split) { |
|
3083 |
++new_long; |
|
3084 |
off[nst] = offset; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3085 |
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
1 | 3086 |
phase->longcon(con), T_LONG); |
3087 |
} else { |
|
3088 |
// Omit either if it is a zero. |
|
3089 |
if (con0 != 0) { |
|
3090 |
++new_int; |
|
3091 |
off[nst] = offset; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3092 |
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
1 | 3093 |
phase->intcon(con0), T_INT); |
3094 |
} |
|
3095 |
if (con1 != 0) { |
|
3096 |
++new_int; |
|
3097 |
offset += BytesPerInt; |
|
3098 |
adr = make_raw_address(offset, phase); |
|
3099 |
off[nst] = offset; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3100 |
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
1 | 3101 |
phase->intcon(con1), T_INT); |
3102 |
} |
|
3103 |
} |
|
3104 |
||
3105 |
// Insert second store first, then the first before the second. |
|
3106 |
// Insert each one just before any overlapping non-constant stores. |
|
3107 |
while (nst > 0) { |
|
3108 |
Node* st1 = st[--nst]; |
|
3109 |
C->copy_node_notes_to(st1, old); |
|
3110 |
st1 = phase->transform(st1); |
|
3111 |
offset = off[nst]; |
|
3112 |
assert(offset >= header_size, "do not smash header"); |
|
3113 |
int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase); |
|
3114 |
guarantee(ins_idx != 0, "must re-insert constant store"); |
|
3115 |
if (ins_idx < 0) ins_idx = -ins_idx; // never overlap |
|
3116 |
if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem) |
|
3117 |
set_req(--ins_idx, st1); |
|
3118 |
else |
|
3119 |
ins_req(ins_idx, st1); |
|
3120 |
} |
|
3121 |
} |
|
3122 |
||
3123 |
if (PrintCompilation && WizardMode) |
|
3124 |
tty->print_cr("Changed %d/%d subword/long constants into %d/%d int/long", |
|
3125 |
old_subword, old_long, new_int, new_long); |
|
3126 |
if (C->log() != NULL) |
|
3127 |
C->log()->elem("comment that='%d/%d subword/long to %d/%d int/long'", |
|
3128 |
old_subword, old_long, new_int, new_long); |
|
3129 |
||
3130 |
// Clean up any remaining occurrences of zmem: |
|
3131 |
remove_extra_zeroes(); |
|
3132 |
} |
|
3133 |
||
3134 |
// Explore forward from in(start) to find the first fully initialized |
|
3135 |
// word, and return its offset. Skip groups of subword stores which |
|
3136 |
// together initialize full words. If in(start) is itself part of a |
|
3137 |
// fully initialized word, return the offset of in(start). If there |
|
3138 |
// are no following full-word stores, or if something is fishy, return |
|
3139 |
// a negative value. |
|
3140 |
intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) { |
|
3141 |
int int_map = 0; |
|
3142 |
intptr_t int_map_off = 0; |
|
3143 |
const int FULL_MAP = right_n_bits(BytesPerInt); // the int_map we hope for |
|
3144 |
||
3145 |
for (uint i = start, limit = req(); i < limit; i++) { |
|
3146 |
Node* st = in(i); |
|
3147 |
||
3148 |
intptr_t st_off = get_store_offset(st, phase); |
|
3149 |
if (st_off < 0) break; // return conservative answer |
|
3150 |
||
3151 |
int st_size = st->as_Store()->memory_size(); |
|
3152 |
if (st_size >= BytesPerInt && (st_off % BytesPerInt) == 0) { |
|
3153 |
return st_off; // we found a complete word init |
|
3154 |
} |
|
3155 |
||
3156 |
// update the map: |
|
3157 |
||
3158 |
intptr_t this_int_off = align_size_down(st_off, BytesPerInt); |
|
3159 |
if (this_int_off != int_map_off) { |
|
3160 |
// reset the map: |
|
3161 |
int_map = 0; |
|
3162 |
int_map_off = this_int_off; |
|
3163 |
} |
|
3164 |
||
3165 |
int subword_off = st_off - this_int_off; |
|
3166 |
int_map |= right_n_bits(st_size) << subword_off; |
|
3167 |
if ((int_map & FULL_MAP) == FULL_MAP) { |
|
3168 |
return this_int_off; // we found a complete word init |
|
3169 |
} |
|
3170 |
||
3171 |
// Did this store hit or cross the word boundary? |
|
3172 |
intptr_t next_int_off = align_size_down(st_off + st_size, BytesPerInt); |
|
3173 |
if (next_int_off == this_int_off + BytesPerInt) { |
|
3174 |
// We passed the current int, without fully initializing it. |
|
3175 |
int_map_off = next_int_off; |
|
3176 |
int_map >>= BytesPerInt; |
|
3177 |
} else if (next_int_off > this_int_off + BytesPerInt) { |
|
3178 |
// We passed the current and next int. |
|
3179 |
return this_int_off + BytesPerInt; |
|
3180 |
} |
|
3181 |
} |
|
3182 |
||
3183 |
return -1; |
|
3184 |
} |
|
3185 |
||
3186 |
||
3187 |
// Called when the associated AllocateNode is expanded into CFG. |
|
3188 |
// At this point, we may perform additional optimizations. |
|
3189 |
// Linearize the stores by ascending offset, to make memory |
|
3190 |
// activity as coherent as possible. |
|
3191 |
Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, |
|
3192 |
intptr_t header_size, |
|
3193 |
Node* size_in_bytes, |
|
3194 |
PhaseGVN* phase) { |
|
3195 |
assert(!is_complete(), "not already complete"); |
|
3196 |
assert(stores_are_sane(phase), ""); |
|
3197 |
assert(allocation() != NULL, "must be present"); |
|
3198 |
||
3199 |
remove_extra_zeroes(); |
|
3200 |
||
3201 |
if (ReduceFieldZeroing || ReduceBulkZeroing) |
|
3202 |
// reduce instruction count for common initialization patterns |
|
3203 |
coalesce_subword_stores(header_size, size_in_bytes, phase); |
|
3204 |
||
3205 |
Node* zmem = zero_memory(); // initially zero memory state |
|
3206 |
Node* inits = zmem; // accumulating a linearized chain of inits |
|
3207 |
#ifdef ASSERT |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3208 |
intptr_t first_offset = allocation()->minimum_header_size(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3209 |
intptr_t last_init_off = first_offset; // previous init offset |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3210 |
intptr_t last_init_end = first_offset; // previous init offset+size |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3211 |
intptr_t last_tile_end = first_offset; // previous tile offset+size |
1 | 3212 |
#endif |
3213 |
intptr_t zeroes_done = header_size; |
|
3214 |
||
3215 |
bool do_zeroing = true; // we might give up if inits are very sparse |
|
3216 |
int big_init_gaps = 0; // how many large gaps have we seen? |
|
3217 |
||
3218 |
if (ZeroTLAB) do_zeroing = false; |
|
3219 |
if (!ReduceFieldZeroing && !ReduceBulkZeroing) do_zeroing = false; |
|
3220 |
||
3221 |
for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) { |
|
3222 |
Node* st = in(i); |
|
3223 |
intptr_t st_off = get_store_offset(st, phase); |
|
3224 |
if (st_off < 0) |
|
3225 |
break; // unknown junk in the inits |
|
3226 |
if (st->in(MemNode::Memory) != zmem) |
|
3227 |
break; // complicated store chains somehow in list |
|
3228 |
||
3229 |
int st_size = st->as_Store()->memory_size(); |
|
3230 |
intptr_t next_init_off = st_off + st_size; |
|
3231 |
||
3232 |
if (do_zeroing && zeroes_done < next_init_off) { |
|
3233 |
// See if this store needs a zero before it or under it. |
|
3234 |
intptr_t zeroes_needed = st_off; |
|
3235 |
||
3236 |
if (st_size < BytesPerInt) { |
|
3237 |
// Look for subword stores which only partially initialize words. |
|
3238 |
// If we find some, we must lay down some word-level zeroes first, |
|
3239 |
// underneath the subword stores. |
|
3240 |
// |
|
3241 |
// Examples: |
|
3242 |
// byte[] a = { p,q,r,s } => a[0]=p,a[1]=q,a[2]=r,a[3]=s |
|
3243 |
// byte[] a = { x,y,0,0 } => a[0..3] = 0, a[0]=x,a[1]=y |
|
3244 |
// byte[] a = { 0,0,z,0 } => a[0..3] = 0, a[2]=z |
|
3245 |
// |
|
3246 |
// Note: coalesce_subword_stores may have already done this, |
|
3247 |
// if it was prompted by constant non-zero subword initializers. |
|
3248 |
// But this case can still arise with non-constant stores. |
|
3249 |
||
3250 |
intptr_t next_full_store = find_next_fullword_store(i, phase); |
|
3251 |
||
3252 |
// In the examples above: |
|
3253 |
// in(i) p q r s x y z |
|
3254 |
// st_off 12 13 14 15 12 13 14 |
|
3255 |
// st_size 1 1 1 1 1 1 1 |
|
3256 |
// next_full_s. 12 16 16 16 16 16 16 |
|
3257 |
// z's_done 12 16 16 16 12 16 12 |
|
3258 |
// z's_needed 12 16 16 16 16 16 16 |
|
3259 |
// zsize 0 0 0 0 4 0 4 |
|
3260 |
if (next_full_store < 0) { |
|
3261 |
// Conservative tack: Zero to end of current word. |
|
3262 |
zeroes_needed = align_size_up(zeroes_needed, BytesPerInt); |
|
3263 |
} else { |
|
3264 |
// Zero to beginning of next fully initialized word. |
|
3265 |
// Or, don't zero at all, if we are already in that word. |
|
3266 |
assert(next_full_store >= zeroes_needed, "must go forward"); |
|
3267 |
assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary"); |
|
3268 |
zeroes_needed = next_full_store; |
|
3269 |
} |
|
3270 |
} |
|
3271 |
||
3272 |
if (zeroes_needed > zeroes_done) { |
|
3273 |
intptr_t zsize = zeroes_needed - zeroes_done; |
|
3274 |
// Do some incremental zeroing on rawmem, in parallel with inits. |
|
3275 |
zeroes_done = align_size_down(zeroes_done, BytesPerInt); |
|
3276 |
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, |
|
3277 |
zeroes_done, zeroes_needed, |
|
3278 |
phase); |
|
3279 |
zeroes_done = zeroes_needed; |
|
3280 |
if (zsize > Matcher::init_array_short_size && ++big_init_gaps > 2) |
|
3281 |
do_zeroing = false; // leave the hole, next time |
|
3282 |
} |
|
3283 |
} |
|
3284 |
||
3285 |
// Collect the store and move on: |
|
3286 |
st->set_req(MemNode::Memory, inits); |
|
3287 |
inits = st; // put it on the linearized chain |
|
3288 |
set_req(i, zmem); // unhook from previous position |
|
3289 |
||
3290 |
if (zeroes_done == st_off) |
|
3291 |
zeroes_done = next_init_off; |
|
3292 |
||
3293 |
assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any"); |
|
3294 |
||
3295 |
#ifdef ASSERT |
|
3296 |
// Various order invariants. Weaker than stores_are_sane because |
|
3297 |
// a large constant tile can be filled in by smaller non-constant stores. |
|
3298 |
assert(st_off >= last_init_off, "inits do not reverse"); |
|
3299 |
last_init_off = st_off; |
|
3300 |
const Type* val = NULL; |
|
3301 |
if (st_size >= BytesPerInt && |
|
3302 |
(val = phase->type(st->in(MemNode::ValueIn)))->singleton() && |
|
3303 |
(int)val->basic_type() < (int)T_OBJECT) { |
|
3304 |
assert(st_off >= last_tile_end, "tiles do not overlap"); |
|
3305 |
assert(st_off >= last_init_end, "tiles do not overwrite inits"); |
|
3306 |
last_tile_end = MAX2(last_tile_end, next_init_off); |
|
3307 |
} else { |
|
3308 |
intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong); |
|
3309 |
assert(st_tile_end >= last_tile_end, "inits stay with tiles"); |
|
3310 |
assert(st_off >= last_init_end, "inits do not overlap"); |
|
3311 |
last_init_end = next_init_off; // it's a non-tile |
|
3312 |
} |
|
3313 |
#endif //ASSERT |
|
3314 |
} |
|
3315 |
||
3316 |
remove_extra_zeroes(); // clear out all the zmems left over |
|
3317 |
add_req(inits); |
|
3318 |
||
3319 |
if (!ZeroTLAB) { |
|
3320 |
// If anything remains to be zeroed, zero it all now. |
|
3321 |
zeroes_done = align_size_down(zeroes_done, BytesPerInt); |
|
3322 |
// if it is the last unused 4 bytes of an instance, forget about it |
|
3323 |
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); |
|
3324 |
if (zeroes_done + BytesPerLong >= size_limit) { |
|
3325 |
assert(allocation() != NULL, ""); |
|
3326 |
Node* klass_node = allocation()->in(AllocateNode::KlassNode); |
|
3327 |
ciKlass* k = phase->type(klass_node)->is_klassptr()->klass(); |
|
3328 |
if (zeroes_done == k->layout_helper()) |
|
3329 |
zeroes_done = size_limit; |
|
3330 |
} |
|
3331 |
if (zeroes_done < size_limit) { |
|
3332 |
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, |
|
3333 |
zeroes_done, size_in_bytes, phase); |
|
3334 |
} |
|
3335 |
} |
|
3336 |
||
3337 |
set_complete(phase); |
|
3338 |
return rawmem; |
|
3339 |
} |
|
3340 |
||
3341 |
||
3342 |
#ifdef ASSERT |
|
3343 |
bool InitializeNode::stores_are_sane(PhaseTransform* phase) { |
|
3344 |
if (is_complete()) |
|
3345 |
return true; // stores could be anything at this point |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3346 |
assert(allocation() != NULL, "must be present"); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3347 |
intptr_t last_off = allocation()->minimum_header_size(); |
1 | 3348 |
for (uint i = InitializeNode::RawStores; i < req(); i++) { |
3349 |
Node* st = in(i); |
|
3350 |
intptr_t st_off = get_store_offset(st, phase); |
|
3351 |
if (st_off < 0) continue; // ignore dead garbage |
|
3352 |
if (last_off > st_off) { |
|
3353 |
tty->print_cr("*** bad store offset at %d: %d > %d", i, last_off, st_off); |
|
3354 |
this->dump(2); |
|
3355 |
assert(false, "ascending store offsets"); |
|
3356 |
return false; |
|
3357 |
} |
|
3358 |
last_off = st_off + st->as_Store()->memory_size(); |
|
3359 |
} |
|
3360 |
return true; |
|
3361 |
} |
|
3362 |
#endif //ASSERT |
|
3363 |
||
3364 |
||
3365 |
||
3366 |
||
3367 |
//============================MergeMemNode===================================== |
|
3368 |
// |
|
3369 |
// SEMANTICS OF MEMORY MERGES: A MergeMem is a memory state assembled from several |
|
3370 |
// contributing store or call operations. Each contributor provides the memory |
|
3371 |
// state for a particular "alias type" (see Compile::alias_type). For example, |
|
3372 |
// if a MergeMem has an input X for alias category #6, then any memory reference |
|
3373 |
// to alias category #6 may use X as its memory state input, as an exact equivalent |
|
3374 |
// to using the MergeMem as a whole. |
|
3375 |
// Load<6>( MergeMem(<6>: X, ...), p ) <==> Load<6>(X,p) |
|
3376 |
// |
|
3377 |
// (Here, the <N> notation gives the index of the relevant adr_type.) |
|
3378 |
// |
|
3379 |
// In one special case (and more cases in the future), alias categories overlap. |
|
3380 |
// The special alias category "Bot" (Compile::AliasIdxBot) includes all memory |
|
3381 |
// states. Therefore, if a MergeMem has only one contributing input W for Bot, |
|
3382 |
// it is exactly equivalent to that state W: |
|
3383 |
// MergeMem(<Bot>: W) <==> W |
|
3384 |
// |
|
3385 |
// Usually, the merge has more than one input. In that case, where inputs |
|
3386 |
// overlap (i.e., one is Bot), the narrower alias type determines the memory |
|
3387 |
// state for that type, and the wider alias type (Bot) fills in everywhere else: |
|
3388 |
// Load<5>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<5>(W,p) |
|
3389 |
// Load<6>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<6>(X,p) |
|
3390 |
// |
|
3391 |
// A merge can take a "wide" memory state as one of its narrow inputs. |
|
3392 |
// This simply means that the merge observes out only the relevant parts of |
|
3393 |
// the wide input. That is, wide memory states arriving at narrow merge inputs |
|
3394 |
// are implicitly "filtered" or "sliced" as necessary. (This is rare.) |
|
3395 |
// |
|
3396 |
// These rules imply that MergeMem nodes may cascade (via their <Bot> links), |
|
3397 |
// and that memory slices "leak through": |
|
3398 |
// MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y)) <==> MergeMem(<Bot>: W, <7>: Y) |
|
3399 |
// |
|
3400 |
// But, in such a cascade, repeated memory slices can "block the leak": |
|
3401 |
// MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y), <7>: Y') <==> MergeMem(<Bot>: W, <7>: Y') |
|
3402 |
// |
|
3403 |
// In the last example, Y is not part of the combined memory state of the |
|
3404 |
// outermost MergeMem. The system must, of course, prevent unschedulable |
|
3405 |
// memory states from arising, so you can be sure that the state Y is somehow |
|
3406 |
// a precursor to state Y'. |
|
3407 |
// |
|
3408 |
// |
|
3409 |
// REPRESENTATION OF MEMORY MERGES: The indexes used to address the Node::in array |
|
3410 |
// of each MergeMemNode array are exactly the numerical alias indexes, including |
|
3411 |
// but not limited to AliasIdxTop, AliasIdxBot, and AliasIdxRaw. The functions |
|
3412 |
// Compile::alias_type (and kin) produce and manage these indexes. |
|
3413 |
// |
|
3414 |
// By convention, the value of in(AliasIdxTop) (i.e., in(1)) is always the top node. |
|
3415 |
// (Note that this provides quick access to the top node inside MergeMem methods, |
|
3416 |
// without the need to reach out via TLS to Compile::current.) |
|
3417 |
// |
|
3418 |
// As a consequence of what was just described, a MergeMem that represents a full |
|
3419 |
// memory state has an edge in(AliasIdxBot) which is a "wide" memory state, |
|
3420 |
// containing all alias categories. |
|
3421 |
// |
|
3422 |
// MergeMem nodes never (?) have control inputs, so in(0) is NULL. |
|
3423 |
// |
|
3424 |
// All other edges in(N) (including in(AliasIdxRaw), which is in(3)) are either |
|
3425 |
// a memory state for the alias type <N>, or else the top node, meaning that |
|
3426 |
// there is no particular input for that alias type. Note that the length of |
|
3427 |
// a MergeMem is variable, and may be extended at any time to accommodate new |
|
3428 |
// memory states at larger alias indexes. When merges grow, they are of course |
|
3429 |
// filled with "top" in the unused in() positions. |
|
3430 |
// |
|
3431 |
// This use of top is named "empty_memory()", or "empty_mem" (no-memory) as a variable. |
|
3432 |
// (Top was chosen because it works smoothly with passes like GCM.) |
|
3433 |
// |
|
3434 |
// For convenience, we hardwire the alias index for TypeRawPtr::BOTTOM. (It is |
|
3435 |
// the type of random VM bits like TLS references.) Since it is always the |
|
3436 |
// first non-Bot memory slice, some low-level loops use it to initialize an |
|
3437 |
// index variable: for (i = AliasIdxRaw; i < req(); i++). |
|
3438 |
// |
|
3439 |
// |
|
3440 |
// ACCESSORS: There is a special accessor MergeMemNode::base_memory which returns |
|
3441 |
// the distinguished "wide" state. The accessor MergeMemNode::memory_at(N) returns |
|
3442 |
// the memory state for alias type <N>, or (if there is no particular slice at <N>, |
|
3443 |
// it returns the base memory. To prevent bugs, memory_at does not accept <Top> |
|
3444 |
// or <Bot> indexes. The iterator MergeMemStream provides robust iteration over |
|
3445 |
// MergeMem nodes or pairs of such nodes, ensuring that the non-top edges are visited. |
|
3446 |
// |
|
3447 |
// %%%% We may get rid of base_memory as a separate accessor at some point; it isn't |
|
3448 |
// really that different from the other memory inputs. An abbreviation called |
|
3449 |
// "bot_memory()" for "memory_at(AliasIdxBot)" would keep code tidy. |
|
3450 |
// |
|
3451 |
// |
|
3452 |
// PARTIAL MEMORY STATES: During optimization, MergeMem nodes may arise that represent |
|
3453 |
// partial memory states. When a Phi splits through a MergeMem, the copy of the Phi |
|
3454 |
// that "emerges though" the base memory will be marked as excluding the alias types |
|
3455 |
// of the other (narrow-memory) copies which "emerged through" the narrow edges: |
|
3456 |
// |
|
3457 |
// Phi<Bot>(U, MergeMem(<Bot>: W, <8>: Y)) |
|
3458 |
// ==Ideal=> MergeMem(<Bot>: Phi<Bot-8>(U, W), Phi<8>(U, Y)) |
|
3459 |
// |
|
3460 |
// This strange "subtraction" effect is necessary to ensure IGVN convergence. |
|
3461 |
// (It is currently unimplemented.) As you can see, the resulting merge is |
|
3462 |
// actually a disjoint union of memory states, rather than an overlay. |
|
3463 |
// |
|
3464 |
||
3465 |
//------------------------------MergeMemNode----------------------------------- |
|
3466 |
Node* MergeMemNode::make_empty_memory() { |
|
3467 |
Node* empty_memory = (Node*) Compile::current()->top(); |
|
3468 |
assert(empty_memory->is_top(), "correct sentinel identity"); |
|
3469 |
return empty_memory; |
|
3470 |
} |
|
3471 |
||
3472 |
MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) { |
|
3473 |
init_class_id(Class_MergeMem); |
|
3474 |
// all inputs are nullified in Node::Node(int) |
|
3475 |
// set_input(0, NULL); // no control input |
|
3476 |
||
3477 |
// Initialize the edges uniformly to top, for starters. |
|
3478 |
Node* empty_mem = make_empty_memory(); |
|
3479 |
for (uint i = Compile::AliasIdxTop; i < req(); i++) { |
|
3480 |
init_req(i,empty_mem); |
|
3481 |
} |
|
3482 |
assert(empty_memory() == empty_mem, ""); |
|
3483 |
||
3484 |
if( new_base != NULL && new_base->is_MergeMem() ) { |
|
3485 |
MergeMemNode* mdef = new_base->as_MergeMem(); |
|
3486 |
assert(mdef->empty_memory() == empty_mem, "consistent sentinels"); |
|
3487 |
for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) { |
|
3488 |
mms.set_memory(mms.memory2()); |
|
3489 |
} |
|
3490 |
assert(base_memory() == mdef->base_memory(), ""); |
|
3491 |
} else { |
|
3492 |
set_base_memory(new_base); |
|
3493 |
} |
|
3494 |
} |
|
3495 |
||
3496 |
// Make a new, untransformed MergeMem with the same base as 'mem'. |
|
3497 |
// If mem is itself a MergeMem, populate the result with the same edges. |
|
3498 |
MergeMemNode* MergeMemNode::make(Compile* C, Node* mem) { |
|
3499 |
return new(C, 1+Compile::AliasIdxRaw) MergeMemNode(mem); |
|
3500 |
} |
|
3501 |
||
3502 |
//------------------------------cmp-------------------------------------------- |
|
3503 |
uint MergeMemNode::hash() const { return NO_HASH; } |
|
3504 |
uint MergeMemNode::cmp( const Node &n ) const { |
|
3505 |
return (&n == this); // Always fail except on self |
|
3506 |
} |
|
3507 |
||
3508 |
//------------------------------Identity--------------------------------------- |
|
3509 |
Node* MergeMemNode::Identity(PhaseTransform *phase) { |
|
3510 |
// Identity if this merge point does not record any interesting memory |
|
3511 |
// disambiguations. |
|
3512 |
Node* base_mem = base_memory(); |
|
3513 |
Node* empty_mem = empty_memory(); |
|
3514 |
if (base_mem != empty_mem) { // Memory path is not dead? |
|
3515 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
3516 |
Node* mem = in(i); |
|
3517 |
if (mem != empty_mem && mem != base_mem) { |
|
3518 |
return this; // Many memory splits; no change |
|
3519 |
} |
|
3520 |
} |
|
3521 |
} |
|
3522 |
return base_mem; // No memory splits; ID on the one true input |
|
3523 |
} |
|
3524 |
||
3525 |
//------------------------------Ideal------------------------------------------ |
|
3526 |
// This method is invoked recursively on chains of MergeMem nodes |
|
3527 |
Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
3528 |
// Remove chain'd MergeMems |
|
3529 |
// |
|
3530 |
// This is delicate, because the each "in(i)" (i >= Raw) is interpreted |
|
3531 |
// relative to the "in(Bot)". Since we are patching both at the same time, |
|
3532 |
// we have to be careful to read each "in(i)" relative to the old "in(Bot)", |
|
3533 |
// but rewrite each "in(i)" relative to the new "in(Bot)". |
|
3534 |
Node *progress = NULL; |
|
3535 |
||
3536 |
||
3537 |
Node* old_base = base_memory(); |
|
3538 |
Node* empty_mem = empty_memory(); |
|
3539 |
if (old_base == empty_mem) |
|
3540 |
return NULL; // Dead memory path. |
|
3541 |
||
3542 |
MergeMemNode* old_mbase; |
|
3543 |
if (old_base != NULL && old_base->is_MergeMem()) |
|
3544 |
old_mbase = old_base->as_MergeMem(); |
|
3545 |
else |
|
3546 |
old_mbase = NULL; |
|
3547 |
Node* new_base = old_base; |
|
3548 |
||
3549 |
// simplify stacked MergeMems in base memory |
|
3550 |
if (old_mbase) new_base = old_mbase->base_memory(); |
|
3551 |
||
3552 |
// the base memory might contribute new slices beyond my req() |
|
3553 |
if (old_mbase) grow_to_match(old_mbase); |
|
3554 |
||
3555 |
// Look carefully at the base node if it is a phi. |
|
3556 |
PhiNode* phi_base; |
|
3557 |
if (new_base != NULL && new_base->is_Phi()) |
|
3558 |
phi_base = new_base->as_Phi(); |
|
3559 |
else |
|
3560 |
phi_base = NULL; |
|
3561 |
||
3562 |
Node* phi_reg = NULL; |
|
3563 |
uint phi_len = (uint)-1; |
|
3564 |
if (phi_base != NULL && !phi_base->is_copy()) { |
|
3565 |
// do not examine phi if degraded to a copy |
|
3566 |
phi_reg = phi_base->region(); |
|
3567 |
phi_len = phi_base->req(); |
|
3568 |
// see if the phi is unfinished |
|
3569 |
for (uint i = 1; i < phi_len; i++) { |
|
3570 |
if (phi_base->in(i) == NULL) { |
|
3571 |
// incomplete phi; do not look at it yet! |
|
3572 |
phi_reg = NULL; |
|
3573 |
phi_len = (uint)-1; |
|
3574 |
break; |
|
3575 |
} |
|
3576 |
} |
|
3577 |
} |
|
3578 |
||
3579 |
// Note: We do not call verify_sparse on entry, because inputs |
|
3580 |
// can normalize to the base_memory via subsume_node or similar |
|
3581 |
// mechanisms. This method repairs that damage. |
|
3582 |
||
3583 |
assert(!old_mbase || old_mbase->is_empty_memory(empty_mem), "consistent sentinels"); |
|
3584 |
||
3585 |
// Look at each slice. |
|
3586 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
3587 |
Node* old_in = in(i); |
|
3588 |
// calculate the old memory value |
|
3589 |
Node* old_mem = old_in; |
|
3590 |
if (old_mem == empty_mem) old_mem = old_base; |
|
3591 |
assert(old_mem == memory_at(i), ""); |
|
3592 |
||
3593 |
// maybe update (reslice) the old memory value |
|
3594 |
||
3595 |
// simplify stacked MergeMems |
|
3596 |
Node* new_mem = old_mem; |
|
3597 |
MergeMemNode* old_mmem; |
|
3598 |
if (old_mem != NULL && old_mem->is_MergeMem()) |
|
3599 |
old_mmem = old_mem->as_MergeMem(); |
|
3600 |
else |
|
3601 |
old_mmem = NULL; |
|
3602 |
if (old_mmem == this) { |
|
3603 |
// This can happen if loops break up and safepoints disappear. |
|
3604 |
// A merge of BotPtr (default) with a RawPtr memory derived from a |
|
3605 |
// safepoint can be rewritten to a merge of the same BotPtr with |
|
3606 |
// the BotPtr phi coming into the loop. If that phi disappears |
|
3607 |
// also, we can end up with a self-loop of the mergemem. |
|
3608 |
// In general, if loops degenerate and memory effects disappear, |
|
3609 |
// a mergemem can be left looking at itself. This simply means |
|
3610 |
// that the mergemem's default should be used, since there is |
|
3611 |
// no longer any apparent effect on this slice. |
|
3612 |
// Note: If a memory slice is a MergeMem cycle, it is unreachable |
|
3613 |
// from start. Update the input to TOP. |
|
3614 |
new_mem = (new_base == this || new_base == empty_mem)? empty_mem : new_base; |
|
3615 |
} |
|
3616 |
else if (old_mmem != NULL) { |
|
3617 |
new_mem = old_mmem->memory_at(i); |
|
3618 |
} |
|
3619 |
// else preceeding memory was not a MergeMem |
|
3620 |
||
3621 |
// replace equivalent phis (unfortunately, they do not GVN together) |
|
3622 |
if (new_mem != NULL && new_mem != new_base && |
|
3623 |
new_mem->req() == phi_len && new_mem->in(0) == phi_reg) { |
|
3624 |
if (new_mem->is_Phi()) { |
|
3625 |
PhiNode* phi_mem = new_mem->as_Phi(); |
|
3626 |
for (uint i = 1; i < phi_len; i++) { |
|
3627 |
if (phi_base->in(i) != phi_mem->in(i)) { |
|
3628 |
phi_mem = NULL; |
|
3629 |
break; |
|
3630 |
} |
|
3631 |
} |
|
3632 |
if (phi_mem != NULL) { |
|
3633 |
// equivalent phi nodes; revert to the def |
|
3634 |
new_mem = new_base; |
|
3635 |
} |
|
3636 |
} |
|
3637 |
} |
|
3638 |
||
3639 |
// maybe store down a new value |
|
3640 |
Node* new_in = new_mem; |
|
3641 |
if (new_in == new_base) new_in = empty_mem; |
|
3642 |
||
3643 |
if (new_in != old_in) { |
|
3644 |
// Warning: Do not combine this "if" with the previous "if" |
|
3645 |
// A memory slice might have be be rewritten even if it is semantically |
|
3646 |
// unchanged, if the base_memory value has changed. |
|
3647 |
set_req(i, new_in); |
|
3648 |
progress = this; // Report progress |
|
3649 |
} |
|
3650 |
} |
|
3651 |
||
3652 |
if (new_base != old_base) { |
|
3653 |
set_req(Compile::AliasIdxBot, new_base); |
|
3654 |
// Don't use set_base_memory(new_base), because we need to update du. |
|
3655 |
assert(base_memory() == new_base, ""); |
|
3656 |
progress = this; |
|
3657 |
} |
|
3658 |
||
3659 |
if( base_memory() == this ) { |
|
3660 |
// a self cycle indicates this memory path is dead |
|
3661 |
set_req(Compile::AliasIdxBot, empty_mem); |
|
3662 |
} |
|
3663 |
||
3664 |
// Resolve external cycles by calling Ideal on a MergeMem base_memory |
|
3665 |
// Recursion must occur after the self cycle check above |
|
3666 |
if( base_memory()->is_MergeMem() ) { |
|
3667 |
MergeMemNode *new_mbase = base_memory()->as_MergeMem(); |
|
3668 |
Node *m = phase->transform(new_mbase); // Rollup any cycles |
|
3669 |
if( m != NULL && (m->is_top() || |
|
3670 |
m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem) ) { |
|
3671 |
// propagate rollup of dead cycle to self |
|
3672 |
set_req(Compile::AliasIdxBot, empty_mem); |
|
3673 |
} |
|
3674 |
} |
|
3675 |
||
3676 |
if( base_memory() == empty_mem ) { |
|
3677 |
progress = this; |
|
3678 |
// Cut inputs during Parse phase only. |
|
3679 |
// During Optimize phase a dead MergeMem node will be subsumed by Top. |
|
3680 |
if( !can_reshape ) { |
|
3681 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
3682 |
if( in(i) != empty_mem ) { set_req(i, empty_mem); } |
|
3683 |
} |
|
3684 |
} |
|
3685 |
} |
|
3686 |
||
3687 |
if( !progress && base_memory()->is_Phi() && can_reshape ) { |
|
3688 |
// Check if PhiNode::Ideal's "Split phis through memory merges" |
|
3689 |
// transform should be attempted. Look for this->phi->this cycle. |
|
3690 |
uint merge_width = req(); |
|
3691 |
if (merge_width > Compile::AliasIdxRaw) { |
|
3692 |
PhiNode* phi = base_memory()->as_Phi(); |
|
3693 |
for( uint i = 1; i < phi->req(); ++i ) {// For all paths in |
|
3694 |
if (phi->in(i) == this) { |
|
3695 |
phase->is_IterGVN()->_worklist.push(phi); |
|
3696 |
break; |
|
3697 |
} |
|
3698 |
} |
|
3699 |
} |
|
3700 |
} |
|
3701 |
||
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
3702 |
assert(progress || verify_sparse(), "please, no dups of base"); |
1 | 3703 |
return progress; |
3704 |
} |
|
3705 |
||
3706 |
//-------------------------set_base_memory------------------------------------- |
|
3707 |
void MergeMemNode::set_base_memory(Node *new_base) { |
|
3708 |
Node* empty_mem = empty_memory(); |
|
3709 |
set_req(Compile::AliasIdxBot, new_base); |
|
3710 |
assert(memory_at(req()) == new_base, "must set default memory"); |
|
3711 |
// Clear out other occurrences of new_base: |
|
3712 |
if (new_base != empty_mem) { |
|
3713 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
3714 |
if (in(i) == new_base) set_req(i, empty_mem); |
|
3715 |
} |
|
3716 |
} |
|
3717 |
} |
|
3718 |
||
3719 |
//------------------------------out_RegMask------------------------------------ |
|
3720 |
const RegMask &MergeMemNode::out_RegMask() const { |
|
3721 |
return RegMask::Empty; |
|
3722 |
} |
|
3723 |
||
3724 |
//------------------------------dump_spec-------------------------------------- |
|
3725 |
#ifndef PRODUCT |
|
3726 |
void MergeMemNode::dump_spec(outputStream *st) const { |
|
3727 |
st->print(" {"); |
|
3728 |
Node* base_mem = base_memory(); |
|
3729 |
for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) { |
|
3730 |
Node* mem = memory_at(i); |
|
3731 |
if (mem == base_mem) { st->print(" -"); continue; } |
|
3732 |
st->print( " N%d:", mem->_idx ); |
|
3733 |
Compile::current()->get_adr_type(i)->dump_on(st); |
|
3734 |
} |
|
3735 |
st->print(" }"); |
|
3736 |
} |
|
3737 |
#endif // !PRODUCT |
|
3738 |
||
3739 |
||
3740 |
#ifdef ASSERT |
|
3741 |
static bool might_be_same(Node* a, Node* b) { |
|
3742 |
if (a == b) return true; |
|
3743 |
if (!(a->is_Phi() || b->is_Phi())) return false; |
|
3744 |
// phis shift around during optimization |
|
3745 |
return true; // pretty stupid... |
|
3746 |
} |
|
3747 |
||
3748 |
// verify a narrow slice (either incoming or outgoing) |
|
3749 |
static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) { |
|
3750 |
if (!VerifyAliases) return; // don't bother to verify unless requested |
|
3751 |
if (is_error_reported()) return; // muzzle asserts when debugging an error |
|
3752 |
if (Node::in_dump()) return; // muzzle asserts when printing |
|
3753 |
assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel"); |
|
3754 |
assert(n != NULL, ""); |
|
3755 |
// Elide intervening MergeMem's |
|
3756 |
while (n->is_MergeMem()) { |
|
3757 |
n = n->as_MergeMem()->memory_at(alias_idx); |
|
3758 |
} |
|
3759 |
Compile* C = Compile::current(); |
|
3760 |
const TypePtr* n_adr_type = n->adr_type(); |
|
3761 |
if (n == m->empty_memory()) { |
|
3762 |
// Implicit copy of base_memory() |
|
3763 |
} else if (n_adr_type != TypePtr::BOTTOM) { |
|
3764 |
assert(n_adr_type != NULL, "new memory must have a well-defined adr_type"); |
|
3765 |
assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice"); |
|
3766 |
} else { |
|
3767 |
// A few places like make_runtime_call "know" that VM calls are narrow, |
|
3768 |
// and can be used to update only the VM bits stored as TypeRawPtr::BOTTOM. |
|
3769 |
bool expected_wide_mem = false; |
|
3770 |
if (n == m->base_memory()) { |
|
3771 |
expected_wide_mem = true; |
|
3772 |
} else if (alias_idx == Compile::AliasIdxRaw || |
|
3773 |
n == m->memory_at(Compile::AliasIdxRaw)) { |
|
3774 |
expected_wide_mem = true; |
|
3775 |
} else if (!C->alias_type(alias_idx)->is_rewritable()) { |
|
3776 |
// memory can "leak through" calls on channels that |
|
3777 |
// are write-once. Allow this also. |
|
3778 |
expected_wide_mem = true; |
|
3779 |
} |
|
3780 |
assert(expected_wide_mem, "expected narrow slice replacement"); |
|
3781 |
} |
|
3782 |
} |
|
3783 |
#else // !ASSERT |
|
3784 |
#define verify_memory_slice(m,i,n) (0) // PRODUCT version is no-op |
|
3785 |
#endif |
|
3786 |
||
3787 |
||
3788 |
//-----------------------------memory_at--------------------------------------- |
|
3789 |
Node* MergeMemNode::memory_at(uint alias_idx) const { |
|
3790 |
assert(alias_idx >= Compile::AliasIdxRaw || |
|
3791 |
alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, |
|
3792 |
"must avoid base_memory and AliasIdxTop"); |
|
3793 |
||
3794 |
// Otherwise, it is a narrow slice. |
|
3795 |
Node* n = alias_idx < req() ? in(alias_idx) : empty_memory(); |
|
3796 |
Compile *C = Compile::current(); |
|
3797 |
if (is_empty_memory(n)) { |
|
3798 |
// the array is sparse; empty slots are the "top" node |
|
3799 |
n = base_memory(); |
|
3800 |
assert(Node::in_dump() |
|
3801 |
|| n == NULL || n->bottom_type() == Type::TOP |
|
3802 |
|| n->adr_type() == TypePtr::BOTTOM |
|
3803 |
|| n->adr_type() == TypeRawPtr::BOTTOM |
|
3804 |
|| Compile::current()->AliasLevel() == 0, |
|
3805 |
"must be a wide memory"); |
|
3806 |
// AliasLevel == 0 if we are organizing the memory states manually. |
|
3807 |
// See verify_memory_slice for comments on TypeRawPtr::BOTTOM. |
|
3808 |
} else { |
|
3809 |
// make sure the stored slice is sane |
|
3810 |
#ifdef ASSERT |
|
3811 |
if (is_error_reported() || Node::in_dump()) { |
|
3812 |
} else if (might_be_same(n, base_memory())) { |
|
3813 |
// Give it a pass: It is a mostly harmless repetition of the base. |
|
3814 |
// This can arise normally from node subsumption during optimization. |
|
3815 |
} else { |
|
3816 |
verify_memory_slice(this, alias_idx, n); |
|
3817 |
} |
|
3818 |
#endif |
|
3819 |
} |
|
3820 |
return n; |
|
3821 |
} |
|
3822 |
||
3823 |
//---------------------------set_memory_at------------------------------------- |
|
3824 |
void MergeMemNode::set_memory_at(uint alias_idx, Node *n) { |
|
3825 |
verify_memory_slice(this, alias_idx, n); |
|
3826 |
Node* empty_mem = empty_memory(); |
|
3827 |
if (n == base_memory()) n = empty_mem; // collapse default |
|
3828 |
uint need_req = alias_idx+1; |
|
3829 |
if (req() < need_req) { |
|
3830 |
if (n == empty_mem) return; // already the default, so do not grow me |
|
3831 |
// grow the sparse array |
|
3832 |
do { |
|
3833 |
add_req(empty_mem); |
|
3834 |
} while (req() < need_req); |
|
3835 |
} |
|
3836 |
set_req( alias_idx, n ); |
|
3837 |
} |
|
3838 |
||
3839 |
||
3840 |
||
3841 |
//--------------------------iteration_setup------------------------------------ |
|
3842 |
void MergeMemNode::iteration_setup(const MergeMemNode* other) { |
|
3843 |
if (other != NULL) { |
|
3844 |
grow_to_match(other); |
|
3845 |
// invariant: the finite support of mm2 is within mm->req() |
|
3846 |
#ifdef ASSERT |
|
3847 |
for (uint i = req(); i < other->req(); i++) { |
|
3848 |
assert(other->is_empty_memory(other->in(i)), "slice left uncovered"); |
|
3849 |
} |
|
3850 |
#endif |
|
3851 |
} |
|
3852 |
// Replace spurious copies of base_memory by top. |
|
3853 |
Node* base_mem = base_memory(); |
|
3854 |
if (base_mem != NULL && !base_mem->is_top()) { |
|
3855 |
for (uint i = Compile::AliasIdxBot+1, imax = req(); i < imax; i++) { |
|
3856 |
if (in(i) == base_mem) |
|
3857 |
set_req(i, empty_memory()); |
|
3858 |
} |
|
3859 |
} |
|
3860 |
} |
|
3861 |
||
3862 |
//---------------------------grow_to_match------------------------------------- |
|
3863 |
void MergeMemNode::grow_to_match(const MergeMemNode* other) { |
|
3864 |
Node* empty_mem = empty_memory(); |
|
3865 |
assert(other->is_empty_memory(empty_mem), "consistent sentinels"); |
|
3866 |
// look for the finite support of the other memory |
|
3867 |
for (uint i = other->req(); --i >= req(); ) { |
|
3868 |
if (other->in(i) != empty_mem) { |
|
3869 |
uint new_len = i+1; |
|
3870 |
while (req() < new_len) add_req(empty_mem); |
|
3871 |
break; |
|
3872 |
} |
|
3873 |
} |
|
3874 |
} |
|
3875 |
||
3876 |
//---------------------------verify_sparse------------------------------------- |
|
3877 |
#ifndef PRODUCT |
|
3878 |
bool MergeMemNode::verify_sparse() const { |
|
3879 |
assert(is_empty_memory(make_empty_memory()), "sane sentinel"); |
|
3880 |
Node* base_mem = base_memory(); |
|
3881 |
// The following can happen in degenerate cases, since empty==top. |
|
3882 |
if (is_empty_memory(base_mem)) return true; |
|
3883 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
3884 |
assert(in(i) != NULL, "sane slice"); |
|
3885 |
if (in(i) == base_mem) return false; // should have been the sentinel value! |
|
3886 |
} |
|
3887 |
return true; |
|
3888 |
} |
|
3889 |
||
3890 |
bool MergeMemStream::match_memory(Node* mem, const MergeMemNode* mm, int idx) { |
|
3891 |
Node* n; |
|
3892 |
n = mm->in(idx); |
|
3893 |
if (mem == n) return true; // might be empty_memory() |
|
3894 |
n = (idx == Compile::AliasIdxBot)? mm->base_memory(): mm->memory_at(idx); |
|
3895 |
if (mem == n) return true; |
|
3896 |
while (n->is_Phi() && (n = n->as_Phi()->is_copy()) != NULL) { |
|
3897 |
if (mem == n) return true; |
|
3898 |
if (n == NULL) break; |
|
3899 |
} |
|
3900 |
return false; |
|
3901 |
} |
|
3902 |
#endif // !PRODUCT |