author | rkennke |
Tue, 06 Nov 2018 23:03:05 +0100 | |
changeset 52429 | b64514ff68fd |
parent 52224 | 4f2215a00ed1 |
child 52441 | 6082c529aed8 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
48595
5d699d81c10c
8194988: 8 Null pointer dereference defect groups related to MultiNode::proj_out()
dlong
parents:
47580
diff
changeset
|
2 |
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4470
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4470
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4470
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "classfile/systemDictionary.hpp" |
|
27 |
#include "compiler/compileLog.hpp" |
|
51485
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
51482
diff
changeset
|
28 |
#include "gc/shared/barrierSet.hpp" |
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
51482
diff
changeset
|
29 |
#include "gc/shared/c2/barrierSetC2.hpp" |
7397 | 30 |
#include "memory/allocation.inline.hpp" |
37248 | 31 |
#include "memory/resourceArea.hpp" |
7397 | 32 |
#include "oops/objArrayKlass.hpp" |
33 |
#include "opto/addnode.hpp" |
|
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
34 |
#include "opto/arraycopynode.hpp" |
7397 | 35 |
#include "opto/cfgnode.hpp" |
36 |
#include "opto/compile.hpp" |
|
37 |
#include "opto/connode.hpp" |
|
23528 | 38 |
#include "opto/convertnode.hpp" |
7397 | 39 |
#include "opto/loopnode.hpp" |
40 |
#include "opto/machnode.hpp" |
|
41 |
#include "opto/matcher.hpp" |
|
42 |
#include "opto/memnode.hpp" |
|
43 |
#include "opto/mulnode.hpp" |
|
23528 | 44 |
#include "opto/narrowptrnode.hpp" |
7397 | 45 |
#include "opto/phaseX.hpp" |
46 |
#include "opto/regmask.hpp" |
|
46625 | 47 |
#include "utilities/align.hpp" |
29081
c61eb4914428
8072911: Remove includes of oop.inline.hpp from .hpp files
stefank
parents:
28643
diff
changeset
|
48 |
#include "utilities/copy.hpp" |
50525
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
49 |
#include "utilities/macros.hpp" |
46589 | 50 |
#include "utilities/vmError.hpp" |
50525
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
51 |
#if INCLUDE_ZGC |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
52 |
#include "gc/z/c2/zBarrierSetC2.hpp" |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
53 |
#endif |
7397 | 54 |
|
1 | 55 |
// Portions of code courtesy of Clifford Click |
56 |
||
57 |
// Optimization - Graph Style |
|
58 |
||
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
59 |
static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
60 |
|
1 | 61 |
//============================================================================= |
62 |
uint MemNode::size_of() const { return sizeof(*this); } |
|
63 |
||
64 |
const TypePtr *MemNode::adr_type() const { |
|
65 |
Node* adr = in(Address); |
|
27697
ae60f551e5c8
8062258: compiler/debug/TraceIterativeGVN.java segfaults in trace_PhaseIterGVN
vlivanov
parents:
27637
diff
changeset
|
66 |
if (adr == NULL) return NULL; // node is dead |
1 | 67 |
const TypePtr* cross_check = NULL; |
68 |
DEBUG_ONLY(cross_check = _adr_type); |
|
69 |
return calculate_adr_type(adr->bottom_type(), cross_check); |
|
70 |
} |
|
71 |
||
44739
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
72 |
bool MemNode::check_if_adr_maybe_raw(Node* adr) { |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
73 |
if (adr != NULL) { |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
74 |
if (adr->bottom_type()->base() == Type::RawPtr || adr->bottom_type()->base() == Type::AnyPtr) { |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
75 |
return true; |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
76 |
} |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
77 |
} |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
78 |
return false; |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
79 |
} |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
80 |
|
1 | 81 |
#ifndef PRODUCT |
82 |
void MemNode::dump_spec(outputStream *st) const { |
|
83 |
if (in(Address) == NULL) return; // node is dead |
|
84 |
#ifndef ASSERT |
|
85 |
// fake the missing field |
|
86 |
const TypePtr* _adr_type = NULL; |
|
87 |
if (in(Address) != NULL) |
|
88 |
_adr_type = in(Address)->bottom_type()->isa_ptr(); |
|
89 |
#endif |
|
90 |
dump_adr_type(this, _adr_type, st); |
|
91 |
||
92 |
Compile* C = Compile::current(); |
|
34157
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
93 |
if (C->alias_type(_adr_type)->is_volatile()) { |
1 | 94 |
st->print(" Volatile!"); |
34157
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
95 |
} |
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
96 |
if (_unaligned_access) { |
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
97 |
st->print(" unaligned"); |
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
98 |
} |
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
99 |
if (_mismatched_access) { |
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
100 |
st->print(" mismatched"); |
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
101 |
} |
1 | 102 |
} |
103 |
||
104 |
void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) { |
|
105 |
st->print(" @"); |
|
106 |
if (adr_type == NULL) { |
|
107 |
st->print("NULL"); |
|
108 |
} else { |
|
109 |
adr_type->dump_on(st); |
|
110 |
Compile* C = Compile::current(); |
|
111 |
Compile::AliasType* atp = NULL; |
|
112 |
if (C->have_alias_type(adr_type)) atp = C->alias_type(adr_type); |
|
113 |
if (atp == NULL) |
|
114 |
st->print(", idx=?\?;"); |
|
115 |
else if (atp->index() == Compile::AliasIdxBot) |
|
116 |
st->print(", idx=Bot;"); |
|
117 |
else if (atp->index() == Compile::AliasIdxTop) |
|
118 |
st->print(", idx=Top;"); |
|
119 |
else if (atp->index() == Compile::AliasIdxRaw) |
|
120 |
st->print(", idx=Raw;"); |
|
121 |
else { |
|
122 |
ciField* field = atp->field(); |
|
123 |
if (field) { |
|
124 |
st->print(", name="); |
|
125 |
field->print_name_on(st); |
|
126 |
} |
|
127 |
st->print(", idx=%d;", atp->index()); |
|
128 |
} |
|
129 |
} |
|
130 |
} |
|
131 |
||
132 |
extern void print_alias_types(); |
|
133 |
||
134 |
#endif |
|
135 |
||
17383 | 136 |
Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) { |
137 |
assert((t_oop != NULL), "sanity"); |
|
138 |
bool is_instance = t_oop->is_known_instance_field(); |
|
139 |
bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() && |
|
140 |
(load != NULL) && load->is_Load() && |
|
141 |
(phase->is_IterGVN() != NULL); |
|
142 |
if (!(is_instance || is_boxed_value_load)) |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
143 |
return mchain; // don't try to optimize non-instance types |
17383 | 144 |
uint instance_id = t_oop->instance_id(); |
48595
5d699d81c10c
8194988: 8 Null pointer dereference defect groups related to MultiNode::proj_out()
dlong
parents:
47580
diff
changeset
|
145 |
Node *start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory); |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
146 |
Node *prev = NULL; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
147 |
Node *result = mchain; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
148 |
while (prev != result) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
149 |
prev = result; |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
150 |
if (result == start_mem) |
2131 | 151 |
break; // hit one of our sentinels |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
152 |
// skip over a call which does not affect this memory slice |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
153 |
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
154 |
Node *proj_in = result->in(0); |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
155 |
if (proj_in->is_Allocate() && proj_in->_idx == instance_id) { |
2131 | 156 |
break; // hit one of our sentinels |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
157 |
} else if (proj_in->is_Call()) { |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
158 |
// ArrayCopyNodes processed here as well |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
159 |
CallNode *call = proj_in->as_Call(); |
17383 | 160 |
if (!call->may_modify(t_oop, phase)) { // returns false for instances |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
161 |
result = call->in(TypeFunc::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
162 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
163 |
} else if (proj_in->is_Initialize()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
164 |
AllocateNode* alloc = proj_in->as_Initialize()->allocation(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
165 |
// Stop if this is the initialization for the object instance which |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
166 |
// contains this memory slice, otherwise skip over it. |
17383 | 167 |
if ((alloc == NULL) || (alloc->_idx == instance_id)) { |
168 |
break; |
|
169 |
} |
|
170 |
if (is_instance) { |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
171 |
result = proj_in->in(TypeFunc::Memory); |
17383 | 172 |
} else if (is_boxed_value_load) { |
173 |
Node* klass = alloc->in(AllocateNode::KlassNode); |
|
174 |
const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr(); |
|
175 |
if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) { |
|
176 |
result = proj_in->in(TypeFunc::Memory); // not related allocation |
|
177 |
} |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
178 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
179 |
} else if (proj_in->is_MemBar()) { |
39254
fb4492288b01
8156760: VM crashes if -XX:-ReduceInitialCardMarks is set
thartmann
parents:
38676
diff
changeset
|
180 |
ArrayCopyNode* ac = NULL; |
fb4492288b01
8156760: VM crashes if -XX:-ReduceInitialCardMarks is set
thartmann
parents:
38676
diff
changeset
|
181 |
if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) { |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
182 |
break; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
183 |
} |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
184 |
result = proj_in->in(TypeFunc::Memory); |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
185 |
} else { |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
186 |
assert(false, "unexpected projection"); |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
187 |
} |
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
188 |
} else if (result->is_ClearArray()) { |
17383 | 189 |
if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) { |
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
190 |
// Can not bypass initialization of the instance |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
191 |
// we are looking for. |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
192 |
break; |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
193 |
} |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
194 |
// Otherwise skip it (the call updated 'result' value). |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
195 |
} else if (result->is_MergeMem()) { |
17383 | 196 |
result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty); |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
197 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
198 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
199 |
return result; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
200 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
201 |
|
17383 | 202 |
Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) { |
203 |
const TypeOopPtr* t_oop = t_adr->isa_oopptr(); |
|
204 |
if (t_oop == NULL) |
|
205 |
return mchain; // don't try to optimize non-oop types |
|
206 |
Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase); |
|
207 |
bool is_instance = t_oop->is_known_instance_field(); |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
208 |
PhaseIterGVN *igvn = phase->is_IterGVN(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
209 |
if (is_instance && igvn != NULL && result->is_Phi()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
210 |
PhiNode *mphi = result->as_Phi(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
211 |
assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
212 |
const TypePtr *t = mphi->adr_type(); |
589 | 213 |
if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || |
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
214 |
(t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
215 |
t->is_oopptr()->cast_to_exactness(true) |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
216 |
->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
217 |
->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)) { |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
218 |
// clone the Phi with our address type |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
219 |
result = mphi->split_out_instance(t_adr, igvn); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
220 |
} else { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
221 |
assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
222 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
223 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
224 |
return result; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
225 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
226 |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
227 |
static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
228 |
uint alias_idx = phase->C->get_alias_index(tp); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
229 |
Node *mem = mmem; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
230 |
#ifdef ASSERT |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
231 |
{ |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
232 |
// Check that current type is consistent with the alias index used during graph construction |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
233 |
assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
234 |
bool consistent = adr_check == NULL || adr_check->empty() || |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
235 |
phase->C->must_alias(adr_check, alias_idx ); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
236 |
// Sometimes dead array references collapse to a[-1], a[-2], or a[-3] |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
237 |
if( !consistent && adr_check != NULL && !adr_check->empty() && |
595
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
238 |
tp->isa_aryptr() && tp->offset() == Type::OffsetBot && |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
239 |
adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
240 |
( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() || |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
241 |
adr_check->offset() == oopDesc::klass_offset_in_bytes() || |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
242 |
adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
243 |
// don't assert if it is dead code. |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
244 |
consistent = true; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
245 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
246 |
if( !consistent ) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
247 |
st->print("alias_idx==%d, adr_check==", alias_idx); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
248 |
if( adr_check == NULL ) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
249 |
st->print("NULL"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
250 |
} else { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
251 |
adr_check->dump(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
252 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
253 |
st->cr(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
254 |
print_alias_types(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
255 |
assert(consistent, "adr_check must match alias idx"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
256 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
257 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
258 |
#endif |
6741 | 259 |
// TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
260 |
// means an array I have not precisely typed yet. Do not do any |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
261 |
// alias stuff with it any time soon. |
6741 | 262 |
const TypeOopPtr *toop = tp->isa_oopptr(); |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
263 |
if( tp->base() != Type::AnyPtr && |
6741 | 264 |
!(toop && |
265 |
toop->klass() != NULL && |
|
266 |
toop->klass()->is_java_lang_Object() && |
|
267 |
toop->offset() == Type::OffsetBot) ) { |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
268 |
// compress paths and change unreachable cycles to TOP |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
269 |
// If not, we can update the input infinitely along a MergeMem cycle |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
270 |
// Equivalent code in PhiNode::Ideal |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
271 |
Node* m = phase->transform(mmem); |
2131 | 272 |
// If transformed to a MergeMem, get the desired slice |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
273 |
// Otherwise the returned node represents memory for every slice |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
274 |
mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
275 |
// Update input if it is progress over what we have now |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
276 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
277 |
return mem; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
278 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
279 |
|
1 | 280 |
//--------------------------Ideal_common--------------------------------------- |
281 |
// Look for degenerate control and memory inputs. Bypass MergeMem inputs. |
|
282 |
// Unhook non-raw memories from complete (macro-expanded) initializations. |
|
283 |
Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { |
|
284 |
// If our control input is a dead region, kill all below the region |
|
285 |
Node *ctl = in(MemNode::Control); |
|
286 |
if (ctl && remove_dead_region(phase, can_reshape)) |
|
287 |
return this; |
|
1067 | 288 |
ctl = in(MemNode::Control); |
289 |
// Don't bother trying to transform a dead node |
|
15875
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
290 |
if (ctl && ctl->is_top()) return NodeSentinel; |
1 | 291 |
|
2532
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
292 |
PhaseIterGVN *igvn = phase->is_IterGVN(); |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
293 |
// Wait if control on the worklist. |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
294 |
if (ctl && can_reshape && igvn != NULL) { |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
295 |
Node* bol = NULL; |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
296 |
Node* cmp = NULL; |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
297 |
if (ctl->in(0)->is_If()) { |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
298 |
assert(ctl->is_IfTrue() || ctl->is_IfFalse(), "sanity"); |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
299 |
bol = ctl->in(0)->in(1); |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
300 |
if (bol->is_Bool()) |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
301 |
cmp = ctl->in(0)->in(1)->in(1); |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
302 |
} |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
303 |
if (igvn->_worklist.member(ctl) || |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
304 |
(bol != NULL && igvn->_worklist.member(bol)) || |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
305 |
(cmp != NULL && igvn->_worklist.member(cmp)) ) { |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
306 |
// This control path may be dead. |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
307 |
// Delay this memory node transformation until the control is processed. |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
308 |
phase->is_IterGVN()->_worklist.push(this); |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
309 |
return NodeSentinel; // caller will return NULL |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
310 |
} |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
311 |
} |
1 | 312 |
// Ignore if memory is dead, or self-loop |
313 |
Node *mem = in(MemNode::Memory); |
|
15875
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
314 |
if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL |
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
315 |
assert(mem != this, "dead loop in MemNode::Ideal"); |
1 | 316 |
|
11200
ccf66f30d4a8
7117282: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)
kvn
parents:
11191
diff
changeset
|
317 |
if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) { |
ccf66f30d4a8
7117282: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)
kvn
parents:
11191
diff
changeset
|
318 |
// This memory slice may be dead. |
ccf66f30d4a8
7117282: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)
kvn
parents:
11191
diff
changeset
|
319 |
// Delay this mem node transformation until the memory is processed. |
ccf66f30d4a8
7117282: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)
kvn
parents:
11191
diff
changeset
|
320 |
phase->is_IterGVN()->_worklist.push(this); |
ccf66f30d4a8
7117282: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)
kvn
parents:
11191
diff
changeset
|
321 |
return NodeSentinel; // caller will return NULL |
ccf66f30d4a8
7117282: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)
kvn
parents:
11191
diff
changeset
|
322 |
} |
ccf66f30d4a8
7117282: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)
kvn
parents:
11191
diff
changeset
|
323 |
|
1 | 324 |
Node *address = in(MemNode::Address); |
15875
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
325 |
const Type *t_adr = phase->type(address); |
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
326 |
if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL |
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
327 |
|
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
328 |
if (can_reshape && igvn != NULL && |
6752
9a3b09fd5745
6916062: assert(_inserts <= _insert_limit,"hash table overflow") in NodeHash::hash_insert
kvn
parents:
6741
diff
changeset
|
329 |
(igvn->_worklist.member(address) || |
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
330 |
(igvn->_worklist.size() > 0 && t_adr != adr_type())) ) { |
1500
bea9a90f3e8f
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
1398
diff
changeset
|
331 |
// The address's base and type may change when the address is processed. |
bea9a90f3e8f
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
1398
diff
changeset
|
332 |
// Delay this mem node transformation until the address is processed. |
bea9a90f3e8f
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
1398
diff
changeset
|
333 |
phase->is_IterGVN()->_worklist.push(this); |
bea9a90f3e8f
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
1398
diff
changeset
|
334 |
return NodeSentinel; // caller will return NULL |
bea9a90f3e8f
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
1398
diff
changeset
|
335 |
} |
bea9a90f3e8f
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
1398
diff
changeset
|
336 |
|
4432
29b057bf202d
6896352: CTW fails hotspot/src/share/vm/opto/escape.cpp:1155
kvn
parents:
3905
diff
changeset
|
337 |
// Do NOT remove or optimize the next lines: ensure a new alias index |
29b057bf202d
6896352: CTW fails hotspot/src/share/vm/opto/escape.cpp:1155
kvn
parents:
3905
diff
changeset
|
338 |
// is allocated for an oop pointer type before Escape Analysis. |
29b057bf202d
6896352: CTW fails hotspot/src/share/vm/opto/escape.cpp:1155
kvn
parents:
3905
diff
changeset
|
339 |
// Note: C++ will not remove it since the call has side effect. |
15875
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
340 |
if (t_adr->isa_oopptr()) { |
4432
29b057bf202d
6896352: CTW fails hotspot/src/share/vm/opto/escape.cpp:1155
kvn
parents:
3905
diff
changeset
|
341 |
int alias_idx = phase->C->get_alias_index(t_adr->is_ptr()); |
29b057bf202d
6896352: CTW fails hotspot/src/share/vm/opto/escape.cpp:1155
kvn
parents:
3905
diff
changeset
|
342 |
} |
29b057bf202d
6896352: CTW fails hotspot/src/share/vm/opto/escape.cpp:1155
kvn
parents:
3905
diff
changeset
|
343 |
|
2532
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
344 |
Node* base = NULL; |
24342
34e1384b02db
8039298: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)->higher_equal(TypePtr::NULL_PTR))
kvn
parents:
23528
diff
changeset
|
345 |
if (address->is_AddP()) { |
2532
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
346 |
base = address->in(AddPNode::Base); |
24342
34e1384b02db
8039298: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)->higher_equal(TypePtr::NULL_PTR))
kvn
parents:
23528
diff
changeset
|
347 |
} |
15875
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
348 |
if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) && |
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
349 |
!t_adr->isa_rawptr()) { |
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
350 |
// Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true. |
24342
34e1384b02db
8039298: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)->higher_equal(TypePtr::NULL_PTR))
kvn
parents:
23528
diff
changeset
|
351 |
// Skip this node optimization if its address has TOP base. |
34e1384b02db
8039298: assert(base == NULL || t_adr->isa_rawptr() || !phase->type(base)->higher_equal(TypePtr::NULL_PTR))
kvn
parents:
23528
diff
changeset
|
352 |
return NodeSentinel; // caller will return NULL |
15875
638b3e8fbe5e
8009472: Print additional information for 8004640 failure
kvn
parents:
15813
diff
changeset
|
353 |
} |
2532
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
354 |
|
1 | 355 |
// Avoid independent memory operations |
356 |
Node* old_mem = mem; |
|
357 |
||
209 | 358 |
// The code which unhooks non-raw memories from complete (macro-expanded) |
359 |
// initializations was removed. After macro-expansion all stores catched |
|
360 |
// by Initialize node became raw stores and there is no information |
|
361 |
// which memory slices they modify. So it is unsafe to move any memory |
|
362 |
// operation above these stores. Also in most cases hooked non-raw memories |
|
363 |
// were already unhooked by using information from detect_ptr_independence() |
|
364 |
// and find_previous_store(). |
|
1 | 365 |
|
366 |
if (mem->is_MergeMem()) { |
|
367 |
MergeMemNode* mmem = mem->as_MergeMem(); |
|
368 |
const TypePtr *tp = t_adr->is_ptr(); |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
369 |
|
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
370 |
mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty); |
1 | 371 |
} |
372 |
||
373 |
if (mem != old_mem) { |
|
374 |
set_req(MemNode::Memory, mem); |
|
51078 | 375 |
if (can_reshape && old_mem->outcnt() == 0 && igvn != NULL) { |
376 |
igvn->_worklist.push(old_mem); |
|
15813
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
377 |
} |
51078 | 378 |
if (phase->type(mem) == Type::TOP) return NodeSentinel; |
1 | 379 |
return this; |
380 |
} |
|
381 |
||
382 |
// let the subclass continue analyzing... |
|
383 |
return NULL; |
|
384 |
} |
|
385 |
||
386 |
// Helper function for proving some simple control dominations. |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
387 |
// Attempt to prove that all control inputs of 'dom' dominate 'sub'. |
1 | 388 |
// Already assumes that 'dom' is available at 'sub', and that 'sub' |
389 |
// is not a constant (dominated by the method's StartNode). |
|
390 |
// Used by MemNode::find_previous_store to prove that the |
|
391 |
// control input of a memory operation predates (dominates) |
|
392 |
// an allocation it wants to look past. |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
393 |
bool MemNode::all_controls_dominate(Node* dom, Node* sub) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
394 |
if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
395 |
return false; // Conservative answer for dead code |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
396 |
|
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
397 |
// Check 'dom'. Skip Proj and CatchProj nodes. |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
398 |
dom = dom->find_exact_control(dom); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
399 |
if (dom == NULL || dom->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
400 |
return false; // Conservative answer for dead code |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
401 |
|
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
402 |
if (dom == sub) { |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
403 |
// For the case when, for example, 'sub' is Initialize and the original |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
404 |
// 'dom' is Proj node of the 'sub'. |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
405 |
return false; |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
406 |
} |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
407 |
|
581
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
408 |
if (dom->is_Con() || dom->is_Start() || dom->is_Root() || dom == sub) |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
409 |
return true; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
410 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
411 |
// 'dom' dominates 'sub' if its control edge and control edges |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
412 |
// of all its inputs dominate or equal to sub's control edge. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
413 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
414 |
// Currently 'sub' is either Allocate, Initialize or Start nodes. |
589 | 415 |
// Or Region for the check in LoadNode::Ideal(); |
416 |
// 'sub' should have sub->in(0) != NULL. |
|
417 |
assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() || |
|
17383 | 418 |
sub->is_Region() || sub->is_Call(), "expecting only these nodes"); |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
419 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
420 |
// Get control edge of 'sub'. |
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
421 |
Node* orig_sub = sub; |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
422 |
sub = sub->find_exact_control(sub->in(0)); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
423 |
if (sub == NULL || sub->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
424 |
return false; // Conservative answer for dead code |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
425 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
426 |
assert(sub->is_CFG(), "expecting control"); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
427 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
428 |
if (sub == dom) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
429 |
return true; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
430 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
431 |
if (sub->is_Start() || sub->is_Root()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
432 |
return false; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
433 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
434 |
{ |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
435 |
// Check all control edges of 'dom'. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
436 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
437 |
ResourceMark rm; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
438 |
Arena* arena = Thread::current()->resource_area(); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
439 |
Node_List nlist(arena); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
440 |
Unique_Node_List dom_list(arena); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
441 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
442 |
dom_list.push(dom); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
443 |
bool only_dominating_controls = false; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
444 |
|
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
445 |
for (uint next = 0; next < dom_list.size(); next++) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
446 |
Node* n = dom_list.at(next); |
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
447 |
if (n == orig_sub) |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
448 |
return false; // One of dom's inputs dominated by sub. |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
449 |
if (!n->is_CFG() && n->pinned()) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
450 |
// Check only own control edge for pinned non-control nodes. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
451 |
n = n->find_exact_control(n->in(0)); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
452 |
if (n == NULL || n->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
453 |
return false; // Conservative answer for dead code |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
454 |
assert(n->is_CFG(), "expecting control"); |
619
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
455 |
dom_list.push(n); |
ba19e7bd22cf
6714406: Node::dominates() does not always check for TOP
kvn
parents:
595
diff
changeset
|
456 |
} else if (n->is_Con() || n->is_Start() || n->is_Root()) { |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
457 |
only_dominating_controls = true; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
458 |
} else if (n->is_CFG()) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
459 |
if (n->dominates(sub, nlist)) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
460 |
only_dominating_controls = true; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
461 |
else |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
462 |
return false; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
463 |
} else { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
464 |
// First, own control edge. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
465 |
Node* m = n->find_exact_control(n->in(0)); |
581
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
466 |
if (m != NULL) { |
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
467 |
if (m->is_top()) |
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
468 |
return false; // Conservative answer for dead code |
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
469 |
dom_list.push(m); |
02338c8a1bcf
6701887: JDK7 server VM in endless loop in Node::dominates
kvn
parents:
375
diff
changeset
|
470 |
} |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
471 |
// Now, the rest of edges. |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
472 |
uint cnt = n->req(); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
473 |
for (uint i = 1; i < cnt; i++) { |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
474 |
m = n->find_exact_control(n->in(i)); |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
475 |
if (m == NULL || m->is_top()) |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
476 |
continue; |
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
477 |
dom_list.push(m); |
1 | 478 |
} |
479 |
} |
|
480 |
} |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
481 |
return only_dominating_controls; |
1 | 482 |
} |
483 |
} |
|
484 |
||
485 |
//---------------------detect_ptr_independence--------------------------------- |
|
486 |
// Used by MemNode::find_previous_store to prove that two base |
|
487 |
// pointers are never equal. |
|
488 |
// The pointers are accompanied by their associated allocations, |
|
489 |
// if any, which have been previously discovered by the caller. |
|
490 |
bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1, |
|
491 |
Node* p2, AllocateNode* a2, |
|
492 |
PhaseTransform* phase) { |
|
493 |
// Attempt to prove that these two pointers cannot be aliased. |
|
494 |
// They may both manifestly be allocations, and they should differ. |
|
495 |
// Or, if they are not both allocations, they can be distinct constants. |
|
496 |
// Otherwise, one is an allocation and the other a pre-existing value. |
|
497 |
if (a1 == NULL && a2 == NULL) { // neither an allocation |
|
498 |
return (p1 != p2) && p1->is_Con() && p2->is_Con(); |
|
499 |
} else if (a1 != NULL && a2 != NULL) { // both allocations |
|
500 |
return (a1 != a2); |
|
501 |
} else if (a1 != NULL) { // one allocation a1 |
|
502 |
// (Note: p2->is_Con implies p2->in(0)->is_Root, which dominates.) |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
503 |
return all_controls_dominate(p2, a1); |
1 | 504 |
} else { //(a2 != NULL) // one allocation a2 |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
505 |
return all_controls_dominate(p1, a2); |
1 | 506 |
} |
507 |
return false; |
|
508 |
} |
|
509 |
||
510 |
||
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
511 |
// Find an arraycopy that must have set (can_see_stored_value=true) or |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
512 |
// could have set (can_see_stored_value=false) the value for this load |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
513 |
Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
514 |
if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Op_MemBarStoreStore || |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
515 |
mem->in(0)->Opcode() == Op_MemBarCPUOrder)) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
516 |
Node* mb = mem->in(0); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
517 |
if (mb->in(0) != NULL && mb->in(0)->is_Proj() && |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
518 |
mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
519 |
ArrayCopyNode* ac = mb->in(0)->in(0)->as_ArrayCopy(); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
520 |
if (ac->is_clonebasic()) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
521 |
intptr_t offset; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
522 |
AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase, offset); |
50913
9816d7cc655e
8205940: LoadNode::find_previous_arraycopy fails with "broken allocation" assert
thartmann
parents:
50525
diff
changeset
|
523 |
if (alloc != NULL && alloc == ld_alloc) { |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
524 |
return ac; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
525 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
526 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
527 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
528 |
} else if (mem->is_Proj() && mem->in(0) != NULL && mem->in(0)->is_ArrayCopy()) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
529 |
ArrayCopyNode* ac = mem->in(0)->as_ArrayCopy(); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
530 |
|
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
531 |
if (ac->is_arraycopy_validated() || |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
532 |
ac->is_copyof_validated() || |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
533 |
ac->is_copyofrange_validated()) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
534 |
Node* ld_addp = in(MemNode::Address); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
535 |
if (ld_addp->is_AddP()) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
536 |
Node* ld_base = ld_addp->in(AddPNode::Address); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
537 |
Node* ld_offs = ld_addp->in(AddPNode::Offset); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
538 |
|
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
539 |
Node* dest = ac->in(ArrayCopyNode::Dest); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
540 |
|
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
541 |
if (dest == ld_base) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
542 |
const TypeX *ld_offs_t = phase->type(ld_offs)->isa_intptr_t(); |
32370
38b7b5772b4f
8130847: Cloned object's fields observed as null after C2 escape analysis
roland
parents:
31231
diff
changeset
|
543 |
if (ac->modifies(ld_offs_t->_lo, ld_offs_t->_hi, phase, can_see_stored_value)) { |
38b7b5772b4f
8130847: Cloned object's fields observed as null after C2 escape analysis
roland
parents:
31231
diff
changeset
|
544 |
return ac; |
38b7b5772b4f
8130847: Cloned object's fields observed as null after C2 escape analysis
roland
parents:
31231
diff
changeset
|
545 |
} |
38b7b5772b4f
8130847: Cloned object's fields observed as null after C2 escape analysis
roland
parents:
31231
diff
changeset
|
546 |
if (!can_see_stored_value) { |
38b7b5772b4f
8130847: Cloned object's fields observed as null after C2 escape analysis
roland
parents:
31231
diff
changeset
|
547 |
mem = ac->in(TypeFunc::Memory); |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
548 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
549 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
550 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
551 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
552 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
553 |
return NULL; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
554 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
555 |
|
1 | 556 |
// The logic for reordering loads and stores uses four steps: |
557 |
// (a) Walk carefully past stores and initializations which we |
|
558 |
// can prove are independent of this load. |
|
559 |
// (b) Observe that the next memory state makes an exact match |
|
560 |
// with self (load or store), and locate the relevant store. |
|
561 |
// (c) Ensure that, if we were to wire self directly to the store, |
|
562 |
// the optimizer would fold it up somehow. |
|
563 |
// (d) Do the rewiring, and return, depending on some other part of |
|
564 |
// the optimizer to fold up the load. |
|
565 |
// This routine handles steps (a) and (b). Steps (c) and (d) are |
|
566 |
// specific to loads and stores, so they are handled by the callers. |
|
567 |
// (Currently, only LoadNode::Ideal has steps (c), (d). More later.) |
|
568 |
// |
|
569 |
Node* MemNode::find_previous_store(PhaseTransform* phase) { |
|
570 |
Node* ctrl = in(MemNode::Control); |
|
571 |
Node* adr = in(MemNode::Address); |
|
572 |
intptr_t offset = 0; |
|
573 |
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
574 |
AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); |
|
575 |
||
576 |
if (offset == Type::OffsetBot) |
|
577 |
return NULL; // cannot unalias unless there are precise offsets |
|
578 |
||
44739
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
579 |
const bool adr_maybe_raw = check_if_adr_maybe_raw(adr); |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
580 |
const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
581 |
|
1 | 582 |
intptr_t size_in_bytes = memory_size(); |
583 |
||
584 |
Node* mem = in(MemNode::Memory); // start searching here... |
|
585 |
||
586 |
int cnt = 50; // Cycle limiter |
|
587 |
for (;;) { // While we can dance past unrelated stores... |
|
588 |
if (--cnt < 0) break; // Caught in cycle or a complicated dance? |
|
589 |
||
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
590 |
Node* prev = mem; |
1 | 591 |
if (mem->is_Store()) { |
592 |
Node* st_adr = mem->in(MemNode::Address); |
|
593 |
intptr_t st_offset = 0; |
|
594 |
Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); |
|
595 |
if (st_base == NULL) |
|
596 |
break; // inscrutable pointer |
|
44739
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
597 |
|
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
598 |
// For raw accesses it's not enough to prove that constant offsets don't intersect. |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
599 |
// We need the bases to be the equal in order for the offset check to make sense. |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
600 |
if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) { |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
601 |
break; |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
602 |
} |
3af2c3740d7b
8178047: Aliasing problem with raw memory accesses
iveresov
parents:
42063
diff
changeset
|
603 |
|
1 | 604 |
if (st_offset != offset && st_offset != Type::OffsetBot) { |
605 |
const int MAX_STORE = BytesPerLong; |
|
606 |
if (st_offset >= offset + size_in_bytes || |
|
607 |
st_offset <= offset - MAX_STORE || |
|
608 |
st_offset <= offset - mem->as_Store()->memory_size()) { |
|
609 |
// Success: The offsets are provably independent. |
|
610 |
// (You may ask, why not just test st_offset != offset and be done? |
|
611 |
// The answer is that stores of different sizes can co-exist |
|
612 |
// in the same sequence of RawMem effects. We sometimes initialize |
|
613 |
// a whole 'tile' of array elements with a single jint or jlong.) |
|
614 |
mem = mem->in(MemNode::Memory); |
|
615 |
continue; // (a) advance through independent store memory |
|
616 |
} |
|
617 |
} |
|
618 |
if (st_base != base && |
|
619 |
detect_ptr_independence(base, alloc, |
|
620 |
st_base, |
|
621 |
AllocateNode::Ideal_allocation(st_base, phase), |
|
622 |
phase)) { |
|
623 |
// Success: The bases are provably independent. |
|
624 |
mem = mem->in(MemNode::Memory); |
|
625 |
continue; // (a) advance through independent store memory |
|
626 |
} |
|
627 |
||
628 |
// (b) At this point, if the bases or offsets do not agree, we lose, |
|
629 |
// since we have not managed to prove 'this' and 'mem' independent. |
|
630 |
if (st_base == base && st_offset == offset) { |
|
631 |
return mem; // let caller handle steps (c), (d) |
|
632 |
} |
|
633 |
||
634 |
} else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { |
|
635 |
InitializeNode* st_init = mem->in(0)->as_Initialize(); |
|
636 |
AllocateNode* st_alloc = st_init->allocation(); |
|
637 |
if (st_alloc == NULL) |
|
638 |
break; // something degenerated |
|
639 |
bool known_identical = false; |
|
640 |
bool known_independent = false; |
|
641 |
if (alloc == st_alloc) |
|
642 |
known_identical = true; |
|
643 |
else if (alloc != NULL) |
|
644 |
known_independent = true; |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
645 |
else if (all_controls_dominate(this, st_alloc)) |
1 | 646 |
known_independent = true; |
647 |
||
648 |
if (known_independent) { |
|
649 |
// The bases are provably independent: Either they are |
|
650 |
// manifestly distinct allocations, or else the control |
|
651 |
// of this load dominates the store's allocation. |
|
652 |
int alias_idx = phase->C->get_alias_index(adr_type()); |
|
653 |
if (alias_idx == Compile::AliasIdxRaw) { |
|
654 |
mem = st_alloc->in(TypeFunc::Memory); |
|
655 |
} else { |
|
656 |
mem = st_init->memory(alias_idx); |
|
657 |
} |
|
658 |
continue; // (a) advance through independent store memory |
|
659 |
} |
|
660 |
||
661 |
// (b) at this point, if we are not looking at a store initializing |
|
662 |
// the same allocation we are loading from, we lose. |
|
663 |
if (known_identical) { |
|
664 |
// From caller, can_see_stored_value will consult find_captured_store. |
|
665 |
return mem; // let caller handle steps (c), (d) |
|
666 |
} |
|
667 |
||
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
668 |
} else if (find_previous_arraycopy(phase, alloc, mem, false) != NULL) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
669 |
if (prev != mem) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
670 |
// Found an arraycopy but it doesn't affect that load |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
671 |
continue; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
672 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
673 |
// Found an arraycopy that may affect that load |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
674 |
return mem; |
769 | 675 |
} else if (addr_t != NULL && addr_t->is_known_instance_field()) { |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
676 |
// Can't use optimize_simple_memory_chain() since it needs PhaseGVN. |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
677 |
if (mem->is_Proj() && mem->in(0)->is_Call()) { |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
678 |
// ArrayCopyNodes processed here as well. |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
679 |
CallNode *call = mem->in(0)->as_Call(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
680 |
if (!call->may_modify(addr_t, phase)) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
681 |
mem = call->in(TypeFunc::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
682 |
continue; // (a) advance through independent call memory |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
683 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
684 |
} else if (mem->is_Proj() && mem->in(0)->is_MemBar()) { |
39254
fb4492288b01
8156760: VM crashes if -XX:-ReduceInitialCardMarks is set
thartmann
parents:
38676
diff
changeset
|
685 |
ArrayCopyNode* ac = NULL; |
fb4492288b01
8156760: VM crashes if -XX:-ReduceInitialCardMarks is set
thartmann
parents:
38676
diff
changeset
|
686 |
if (ArrayCopyNode::may_modify(addr_t, mem->in(0)->as_MemBar(), phase, ac)) { |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
687 |
break; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
688 |
} |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
689 |
mem = mem->in(0)->in(TypeFunc::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
690 |
continue; // (a) advance through independent MemBar memory |
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
691 |
} else if (mem->is_ClearArray()) { |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
692 |
if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) { |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
693 |
// (the call updated 'mem' value) |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
694 |
continue; // (a) advance through independent allocation memory |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
695 |
} else { |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
696 |
// Can not bypass initialization of the instance |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
697 |
// we are looking for. |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
698 |
return mem; |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
699 |
} |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
700 |
} else if (mem->is_MergeMem()) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
701 |
int alias_idx = phase->C->get_alias_index(adr_type()); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
702 |
mem = mem->as_MergeMem()->memory_at(alias_idx); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
703 |
continue; // (a) advance through independent MergeMem memory |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
704 |
} |
1 | 705 |
} |
706 |
||
707 |
// Unless there is an explicit 'continue', we must bail out here, |
|
708 |
// because 'mem' is an inscrutable memory state (e.g., a call). |
|
709 |
break; |
|
710 |
} |
|
711 |
||
712 |
return NULL; // bail out |
|
713 |
} |
|
714 |
||
715 |
//----------------------calculate_adr_type------------------------------------- |
|
716 |
// Helper function. Notices when the given type of address hits top or bottom. |
|
717 |
// Also, asserts a cross-check of the type against the expected address type. |
|
718 |
const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) { |
|
719 |
if (t == Type::TOP) return NULL; // does not touch memory any more? |
|
720 |
#ifdef PRODUCT |
|
721 |
cross_check = NULL; |
|
722 |
#else |
|
46589 | 723 |
if (!VerifyAliases || VMError::is_error_reported() || Node::in_dump()) cross_check = NULL; |
1 | 724 |
#endif |
725 |
const TypePtr* tp = t->isa_ptr(); |
|
726 |
if (tp == NULL) { |
|
727 |
assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide"); |
|
728 |
return TypePtr::BOTTOM; // touches lots of memory |
|
729 |
} else { |
|
730 |
#ifdef ASSERT |
|
731 |
// %%%% [phh] We don't check the alias index if cross_check is |
|
732 |
// TypeRawPtr::BOTTOM. Needs to be investigated. |
|
733 |
if (cross_check != NULL && |
|
734 |
cross_check != TypePtr::BOTTOM && |
|
735 |
cross_check != TypeRawPtr::BOTTOM) { |
|
736 |
// Recheck the alias index, to see if it has changed (due to a bug). |
|
737 |
Compile* C = Compile::current(); |
|
738 |
assert(C->get_alias_index(cross_check) == C->get_alias_index(tp), |
|
739 |
"must stay in the original alias category"); |
|
740 |
// The type of the address must be contained in the adr_type, |
|
741 |
// disregarding "null"-ness. |
|
742 |
// (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.) |
|
743 |
const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr(); |
|
22799
83e58bac7980
8027422: assert(_gvn.type(obj)->higher_equal(tjp)) failed: cast_up is no longer needed
roland
parents:
22234
diff
changeset
|
744 |
assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(), |
1 | 745 |
"real address must not escape from expected memory type"); |
746 |
} |
|
747 |
#endif |
|
748 |
return tp; |
|
749 |
} |
|
750 |
} |
|
751 |
||
752 |
//============================================================================= |
|
27637
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
753 |
// Should LoadNode::Ideal() attempt to remove control edges? |
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
754 |
bool LoadNode::can_remove_control() const { |
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
755 |
return true; |
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
756 |
} |
1 | 757 |
uint LoadNode::size_of() const { return sizeof(*this); } |
758 |
uint LoadNode::cmp( const Node &n ) const |
|
759 |
{ return !Type::cmp( _type, ((LoadNode&)n)._type ); } |
|
760 |
const Type *LoadNode::bottom_type() const { return _type; } |
|
761 |
uint LoadNode::ideal_reg() const { |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13104
diff
changeset
|
762 |
return _type->ideal_reg(); |
1 | 763 |
} |
764 |
||
765 |
#ifndef PRODUCT |
|
766 |
void LoadNode::dump_spec(outputStream *st) const { |
|
767 |
MemNode::dump_spec(st); |
|
768 |
if( !Verbose && !WizardMode ) { |
|
769 |
// standard dump does this in Verbose and WizardMode |
|
770 |
st->print(" #"); _type->dump_on(st); |
|
771 |
} |
|
36830
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
772 |
if (!depends_only_on_test()) { |
31035
0f0743952c41
8077504: Unsafe load can loose control dependency and cause crash
roland
parents:
30629
diff
changeset
|
773 |
st->print(" (does not depend only on test)"); |
0f0743952c41
8077504: Unsafe load can loose control dependency and cause crash
roland
parents:
30629
diff
changeset
|
774 |
} |
1 | 775 |
} |
776 |
#endif |
|
777 |
||
5889 | 778 |
#ifdef ASSERT |
779 |
//----------------------------is_immutable_value------------------------------- |
|
780 |
// Helper function to allow a raw load without control edge for some cases |
|
781 |
bool LoadNode::is_immutable_value(Node* adr) { |
|
782 |
return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() && |
|
783 |
adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && |
|
784 |
(adr->in(AddPNode::Offset)->find_intptr_t_con(-1) == |
|
785 |
in_bytes(JavaThread::osthread_offset()))); |
|
786 |
} |
|
787 |
#endif |
|
1 | 788 |
|
789 |
//----------------------------LoadNode::make----------------------------------- |
|
790 |
// Polymorphic factory method: |
|
34189 | 791 |
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo, |
792 |
ControlDependency control_dependency, bool unaligned, bool mismatched) { |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
793 |
Compile* C = gvn.C; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
794 |
|
1 | 795 |
// sanity check the alias category against the created node type |
796 |
assert(!(adr_type->isa_oopptr() && |
|
797 |
adr_type->offset() == oopDesc::klass_offset_in_bytes()), |
|
798 |
"use LoadKlassNode instead"); |
|
799 |
assert(!(adr_type->isa_aryptr() && |
|
800 |
adr_type->offset() == arrayOopDesc::length_offset_in_bytes()), |
|
801 |
"use LoadRangeNode instead"); |
|
5889 | 802 |
// Check control edge of raw loads |
803 |
assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || |
|
804 |
// oop will be recorded in oop map if load crosses safepoint |
|
805 |
rt->isa_oopptr() || is_immutable_value(adr), |
|
806 |
"raw memory operations should have control edge"); |
|
34189 | 807 |
LoadNode* load = NULL; |
1 | 808 |
switch (bt) { |
34189 | 809 |
case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break; |
810 |
case T_BYTE: load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break; |
|
811 |
case T_INT: load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break; |
|
812 |
case T_CHAR: load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break; |
|
813 |
case T_SHORT: load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break; |
|
814 |
case T_LONG: load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break; |
|
815 |
case T_FLOAT: load = new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break; |
|
816 |
case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break; |
|
817 |
case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); break; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
818 |
case T_OBJECT: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
819 |
#ifdef _LP64 |
589 | 820 |
if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
34189 | 821 |
load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
822 |
} else |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
823 |
#endif |
589 | 824 |
{ |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
825 |
assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); |
38030
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
826 |
load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); |
589 | 827 |
} |
34189 | 828 |
break; |
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
829 |
default: |
51078 | 830 |
ShouldNotReachHere(); |
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
831 |
break; |
34189 | 832 |
} |
833 |
assert(load != NULL, "LoadNode should have been created"); |
|
834 |
if (unaligned) { |
|
835 |
load->set_unaligned_access(); |
|
1 | 836 |
} |
34189 | 837 |
if (mismatched) { |
838 |
load->set_mismatched_access(); |
|
839 |
} |
|
840 |
if (load->Opcode() == Op_LoadN) { |
|
841 |
Node* ld = gvn.transform(load); |
|
842 |
return new DecodeNNode(ld, ld->bottom_type()->make_ptr()); |
|
843 |
} |
|
844 |
||
845 |
return load; |
|
1 | 846 |
} |
847 |
||
34189 | 848 |
LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, |
849 |
ControlDependency control_dependency, bool unaligned, bool mismatched) { |
|
1 | 850 |
bool require_atomic = true; |
34189 | 851 |
LoadLNode* load = new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic); |
852 |
if (unaligned) { |
|
853 |
load->set_unaligned_access(); |
|
854 |
} |
|
855 |
if (mismatched) { |
|
856 |
load->set_mismatched_access(); |
|
857 |
} |
|
858 |
return load; |
|
1 | 859 |
} |
860 |
||
34189 | 861 |
LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, |
862 |
ControlDependency control_dependency, bool unaligned, bool mismatched) { |
|
24345
616bc709c0e4
8036851: volatile double accesses are not explicitly atomic in C2
anoll
parents:
24342
diff
changeset
|
863 |
bool require_atomic = true; |
34189 | 864 |
LoadDNode* load = new LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic); |
865 |
if (unaligned) { |
|
866 |
load->set_unaligned_access(); |
|
867 |
} |
|
868 |
if (mismatched) { |
|
869 |
load->set_mismatched_access(); |
|
870 |
} |
|
871 |
return load; |
|
24345
616bc709c0e4
8036851: volatile double accesses are not explicitly atomic in C2
anoll
parents:
24342
diff
changeset
|
872 |
} |
1 | 873 |
|
874 |
||
875 |
||
876 |
//------------------------------hash------------------------------------------- |
|
877 |
uint LoadNode::hash() const { |
|
878 |
// unroll addition of interesting fields |
|
879 |
return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address); |
|
880 |
} |
|
881 |
||
19770
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
882 |
static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) { |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
883 |
if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) { |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
884 |
bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile(); |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
885 |
bool is_stable_ary = FoldStableValues && |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
886 |
(tp != NULL) && (tp->isa_aryptr() != NULL) && |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
887 |
tp->isa_aryptr()->is_stable(); |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
888 |
|
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
889 |
return (eliminate_boxing && non_volatile) || is_stable_ary; |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
890 |
} |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
891 |
|
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
892 |
return false; |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
893 |
} |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
894 |
|
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
895 |
// Is the value loaded previously stored by an arraycopy? If so return |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
896 |
// a load node that reads from the source array so we may be able to |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
897 |
// optimize out the ArrayCopy node later. |
45766
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
898 |
Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const { |
50525
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
899 |
#if INCLUDE_ZGC |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
900 |
if (UseZGC) { |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
901 |
if (bottom_type()->make_oopptr() != NULL) { |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
902 |
return NULL; |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
903 |
} |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
904 |
} |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
905 |
#endif |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
906 |
|
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
907 |
Node* ld_adr = in(MemNode::Address); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
908 |
intptr_t ld_off = 0; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
909 |
AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
910 |
Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
911 |
if (ac != NULL) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
912 |
assert(ac->is_ArrayCopy(), "what kind of node can this be?"); |
32370
38b7b5772b4f
8130847: Cloned object's fields observed as null after C2 escape analysis
roland
parents:
31231
diff
changeset
|
913 |
|
45766
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
914 |
Node* mem = ac->in(TypeFunc::Memory); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
915 |
Node* ctl = ac->in(0); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
916 |
Node* src = ac->in(ArrayCopyNode::Src); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
917 |
|
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
918 |
if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) { |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
919 |
return NULL; |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
920 |
} |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
921 |
|
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
922 |
LoadNode* ld = clone()->as_Load(); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
923 |
Node* addp = in(MemNode::Address)->clone(); |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
924 |
if (ac->as_ArrayCopy()->is_clonebasic()) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
925 |
assert(ld_alloc != NULL, "need an alloc"); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
926 |
assert(addp->is_AddP(), "address must be addp"); |
52224
4f2215a00ed1
8212611: Small collection of simple changes from shenandoah
roland
parents:
52093
diff
changeset
|
927 |
assert(ac->in(ArrayCopyNode::Dest)->is_AddP(), "dest must be an address"); |
4f2215a00ed1
8212611: Small collection of simple changes from shenandoah
roland
parents:
52093
diff
changeset
|
928 |
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
4f2215a00ed1
8212611: Small collection of simple changes from shenandoah
roland
parents:
52093
diff
changeset
|
929 |
assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)->in(AddPNode::Base)), "strange pattern"); |
4f2215a00ed1
8212611: Small collection of simple changes from shenandoah
roland
parents:
52093
diff
changeset
|
930 |
assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)->in(AddPNode::Address)), "strange pattern"); |
45766
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
931 |
addp->set_req(AddPNode::Base, src->in(AddPNode::Base)); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
932 |
addp->set_req(AddPNode::Address, src->in(AddPNode::Address)); |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
933 |
} else { |
45766
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
934 |
assert(ac->as_ArrayCopy()->is_arraycopy_validated() || |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
935 |
ac->as_ArrayCopy()->is_copyof_validated() || |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
936 |
ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases"); |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
937 |
assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be"); |
45427
64e07017ce01
8179678: ArrayCopy with same src and dst can cause incorrect execution or compiler crash
roland
parents:
45244
diff
changeset
|
938 |
addp->set_req(AddPNode::Base, src); |
64e07017ce01
8179678: ArrayCopy with same src and dst can cause incorrect execution or compiler crash
roland
parents:
45244
diff
changeset
|
939 |
addp->set_req(AddPNode::Address, src); |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
940 |
|
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
941 |
const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr(); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
942 |
BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type(); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
943 |
uint header = arrayOopDesc::base_offset_in_bytes(ary_elem); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
944 |
uint shift = exact_log2(type2aelembytes(ary_elem)); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
945 |
|
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
946 |
Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos))); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
947 |
#ifdef _LP64 |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
948 |
diff = phase->transform(new ConvI2LNode(diff)); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
949 |
#endif |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
950 |
diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift))); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
951 |
|
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
952 |
Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff)); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
953 |
addp->set_req(AddPNode::Offset, offset); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
954 |
} |
45766
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
955 |
addp = phase->transform(addp); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
956 |
#ifdef ASSERT |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
957 |
const TypePtr* adr_type = phase->type(addp)->is_ptr(); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
958 |
ld->_adr_type = adr_type; |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
959 |
#endif |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
960 |
ld->set_req(MemNode::Address, addp); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
961 |
ld->set_req(0, ctl); |
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
962 |
ld->set_req(MemNode::Memory, mem); |
32370
38b7b5772b4f
8130847: Cloned object's fields observed as null after C2 escape analysis
roland
parents:
31231
diff
changeset
|
963 |
// load depends on the tests that validate the arraycopy |
45766
4b5557c9b656
8181742: Load that bypasses arraycopy has wrong memory state
roland
parents:
45427
diff
changeset
|
964 |
ld->_control_dependency = Pinned; |
32370
38b7b5772b4f
8130847: Cloned object's fields observed as null after C2 escape analysis
roland
parents:
31231
diff
changeset
|
965 |
return ld; |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
966 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
967 |
return NULL; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
968 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
969 |
|
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
970 |
|
1 | 971 |
//---------------------------can_see_stored_value------------------------------ |
972 |
// This routine exists to make sure this set of tests is done the same |
|
973 |
// everywhere. We need to make a coordinated change: first LoadNode::Ideal |
|
974 |
// will change the graph shape in a way which makes memory alive twice at the |
|
975 |
// same time (uses the Oracle model of aliasing), then some |
|
976 |
// LoadXNode::Identity will fold things back to the equivalence-class model |
|
977 |
// of aliasing. |
|
978 |
Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { |
|
979 |
Node* ld_adr = in(MemNode::Address); |
|
17383 | 980 |
intptr_t ld_off = 0; |
981 |
AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); |
|
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
982 |
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); |
17383 | 983 |
Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL; |
984 |
// This is more general than load from boxing objects. |
|
19770
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
985 |
if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) { |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
986 |
uint alias_idx = atp->index(); |
19770
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
987 |
bool final = !atp->is_rewritable(); |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
988 |
Node* result = NULL; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
989 |
Node* current = st; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
990 |
// Skip through chains of MemBarNodes checking the MergeMems for |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
991 |
// new states for the slice of this load. Stop once any other |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
992 |
// kind of node is encountered. Loads from final memory can skip |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
993 |
// through any kind of MemBar but normal loads shouldn't skip |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
994 |
// through MemBarAcquire since the could allow them to move out of |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
995 |
// a synchronized region. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
996 |
while (current->is_Proj()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
997 |
int opc = current->in(0)->Opcode(); |
22855
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22845
diff
changeset
|
998 |
if ((final && (opc == Op_MemBarAcquire || |
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22845
diff
changeset
|
999 |
opc == Op_MemBarAcquireLock || |
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22845
diff
changeset
|
1000 |
opc == Op_LoadFence)) || |
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22845
diff
changeset
|
1001 |
opc == Op_MemBarRelease || |
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22845
diff
changeset
|
1002 |
opc == Op_StoreFence || |
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22845
diff
changeset
|
1003 |
opc == Op_MemBarReleaseLock || |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1004 |
opc == Op_MemBarStoreStore || |
22855
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22845
diff
changeset
|
1005 |
opc == Op_MemBarCPUOrder) { |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1006 |
Node* mem = current->in(0)->in(TypeFunc::Memory); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1007 |
if (mem->is_MergeMem()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1008 |
MergeMemNode* merge = mem->as_MergeMem(); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1009 |
Node* new_st = merge->memory_at(alias_idx); |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1010 |
if (new_st == merge->base_memory()) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1011 |
// Keep searching |
17383 | 1012 |
current = new_st; |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1013 |
continue; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1014 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1015 |
// Save the new memory state for the slice and fall through |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1016 |
// to exit. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1017 |
result = new_st; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1018 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1019 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1020 |
break; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1021 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1022 |
if (result != NULL) { |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1023 |
st = result; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1024 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1025 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1026 |
|
1 | 1027 |
// Loop around twice in the case Load -> Initialize -> Store. |
1028 |
// (See PhaseIterGVN::add_users_to_worklist, which knows about this case.) |
|
1029 |
for (int trip = 0; trip <= 1; trip++) { |
|
1030 |
||
1031 |
if (st->is_Store()) { |
|
1032 |
Node* st_adr = st->in(MemNode::Address); |
|
1033 |
if (!phase->eqv(st_adr, ld_adr)) { |
|
1034 |
// Try harder before giving up... Match raw and non-raw pointers. |
|
1035 |
intptr_t st_off = 0; |
|
1036 |
AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off); |
|
1037 |
if (alloc == NULL) return NULL; |
|
17383 | 1038 |
if (alloc != ld_alloc) return NULL; |
1 | 1039 |
if (ld_off != st_off) return NULL; |
1040 |
// At this point we have proven something like this setup: |
|
1041 |
// A = Allocate(...) |
|
1042 |
// L = LoadQ(, AddP(CastPP(, A.Parm),, #Off)) |
|
1043 |
// S = StoreQ(, AddP(, A.Parm , #Off), V) |
|
1044 |
// (Actually, we haven't yet proven the Q's are the same.) |
|
1045 |
// In other words, we are loading from a casted version of |
|
1046 |
// the same pointer-and-offset that we stored to. |
|
1047 |
// Thus, we are able to replace L by V. |
|
1048 |
} |
|
1049 |
// Now prove that we have a LoadQ matched to a StoreQ, for some Q. |
|
1050 |
if (store_Opcode() != st->Opcode()) |
|
1051 |
return NULL; |
|
1052 |
return st->in(MemNode::ValueIn); |
|
1053 |
} |
|
1054 |
||
1055 |
// A load from a freshly-created object always returns zero. |
|
1056 |
// (This can happen after LoadNode::Ideal resets the load's memory input |
|
1057 |
// to find_captured_store, which returned InitializeNode::zero_memory.) |
|
1058 |
if (st->is_Proj() && st->in(0)->is_Allocate() && |
|
17383 | 1059 |
(st->in(0) == ld_alloc) && |
1060 |
(ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) { |
|
1 | 1061 |
// return a zero value for the load's basic type |
1062 |
// (This is one of the few places where a generic PhaseTransform |
|
1063 |
// can create new nodes. Think of it as lazily manifesting |
|
1064 |
// virtually pre-existing constants.) |
|
1065 |
return phase->zerocon(memory_type()); |
|
1066 |
} |
|
1067 |
||
1068 |
// A load from an initialization barrier can match a captured store. |
|
1069 |
if (st->is_Proj() && st->in(0)->is_Initialize()) { |
|
1070 |
InitializeNode* init = st->in(0)->as_Initialize(); |
|
1071 |
AllocateNode* alloc = init->allocation(); |
|
17383 | 1072 |
if ((alloc != NULL) && (alloc == ld_alloc)) { |
1 | 1073 |
// examine a captured store value |
17383 | 1074 |
st = init->find_captured_store(ld_off, memory_size(), phase); |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1075 |
if (st != NULL) { |
1 | 1076 |
continue; // take one more trip around |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1077 |
} |
1 | 1078 |
} |
1079 |
} |
|
1080 |
||
17383 | 1081 |
// Load boxed value from result of valueOf() call is input parameter. |
1082 |
if (this->is_Load() && ld_adr->is_AddP() && |
|
1083 |
(tp != NULL) && tp->is_ptr_to_boxed_value()) { |
|
1084 |
intptr_t ignore = 0; |
|
1085 |
Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore); |
|
52224
4f2215a00ed1
8212611: Small collection of simple changes from shenandoah
roland
parents:
52093
diff
changeset
|
1086 |
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
4f2215a00ed1
8212611: Small collection of simple changes from shenandoah
roland
parents:
52093
diff
changeset
|
1087 |
base = bs->step_over_gc_barrier(base); |
17383 | 1088 |
if (base != NULL && base->is_Proj() && |
1089 |
base->as_Proj()->_con == TypeFunc::Parms && |
|
1090 |
base->in(0)->is_CallStaticJava() && |
|
1091 |
base->in(0)->as_CallStaticJava()->is_boxing_method()) { |
|
1092 |
return base->in(0)->in(TypeFunc::Parms); |
|
1093 |
} |
|
1094 |
} |
|
1095 |
||
1 | 1096 |
break; |
1097 |
} |
|
1098 |
||
1099 |
return NULL; |
|
1100 |
} |
|
1101 |
||
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1102 |
//----------------------is_instance_field_load_with_local_phi------------------ |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1103 |
bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) { |
17383 | 1104 |
if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl && |
1105 |
in(Address)->is_AddP() ) { |
|
1106 |
const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr(); |
|
1107 |
// Only instances and boxed values. |
|
1108 |
if( t_oop != NULL && |
|
1109 |
(t_oop->is_ptr_to_boxed_value() || |
|
1110 |
t_oop->is_known_instance_field()) && |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1111 |
t_oop->offset() != Type::OffsetBot && |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1112 |
t_oop->offset() != Type::OffsetTop) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1113 |
return true; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1114 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1115 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1116 |
return false; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1117 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1118 |
|
1 | 1119 |
//------------------------------Identity--------------------------------------- |
1120 |
// Loads are identity if previous store is to same address |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
1121 |
Node* LoadNode::Identity(PhaseGVN* phase) { |
1 | 1122 |
// If the previous store-maker is the right kind of Store, and the store is |
1123 |
// to the same address, then we are equal to the value stored. |
|
17383 | 1124 |
Node* mem = in(Memory); |
1 | 1125 |
Node* value = can_see_stored_value(mem, phase); |
1126 |
if( value ) { |
|
1127 |
// byte, short & char stores truncate naturally. |
|
1128 |
// A load has to load the truncated value which requires |
|
1129 |
// some sort of masking operation and that requires an |
|
1130 |
// Ideal call instead of an Identity call. |
|
1131 |
if (memory_size() < BytesPerInt) { |
|
1132 |
// If the input to the store does not fit with the load's result type, |
|
1133 |
// it must be truncated via an Ideal call. |
|
1134 |
if (!phase->type(value)->higher_equal(phase->type(this))) |
|
1135 |
return this; |
|
1136 |
} |
|
1137 |
// (This works even when value is a Con, but LoadNode::Value |
|
1138 |
// usually runs first, producing the singleton type of the Con.) |
|
1139 |
return value; |
|
1140 |
} |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1141 |
|
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1142 |
// Search for an existing data phi which was generated before for the same |
2131 | 1143 |
// instance's field to avoid infinite generation of phis in a loop. |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1144 |
Node *region = mem->in(0); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1145 |
if (is_instance_field_load_with_local_phi(region)) { |
17383 | 1146 |
const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr(); |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1147 |
int this_index = phase->C->get_alias_index(addr_t); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1148 |
int this_offset = addr_t->offset(); |
17383 | 1149 |
int this_iid = addr_t->instance_id(); |
1150 |
if (!addr_t->is_known_instance() && |
|
1151 |
addr_t->is_ptr_to_boxed_value()) { |
|
1152 |
// Use _idx of address base (could be Phi node) for boxed values. |
|
1153 |
intptr_t ignore = 0; |
|
1154 |
Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore); |
|
45244
24df46d012d5
8180575: Null pointer dereference in LoadNode::Identity()
thartmann
parents:
45241
diff
changeset
|
1155 |
if (base == NULL) { |
24df46d012d5
8180575: Null pointer dereference in LoadNode::Identity()
thartmann
parents:
45241
diff
changeset
|
1156 |
return this; |
24df46d012d5
8180575: Null pointer dereference in LoadNode::Identity()
thartmann
parents:
45241
diff
changeset
|
1157 |
} |
17383 | 1158 |
this_iid = base->_idx; |
1159 |
} |
|
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1160 |
const Type* this_type = bottom_type(); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1161 |
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1162 |
Node* phi = region->fast_out(i); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1163 |
if (phi->is_Phi() && phi != mem && |
40871
82848b0654f8
8038348: Instance field load is replaced by wrong data Phi
thartmann
parents:
40047
diff
changeset
|
1164 |
phi->as_Phi()->is_same_inst_field(this_type, (int)mem->_idx, this_iid, this_index, this_offset)) { |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1165 |
return phi; |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1166 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1167 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1168 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1169 |
|
1 | 1170 |
return this; |
1171 |
} |
|
1172 |
||
36830
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1173 |
// Construct an equivalent unsigned load. |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1174 |
Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) { |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1175 |
BasicType bt = T_ILLEGAL; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1176 |
const Type* rt = NULL; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1177 |
switch (Opcode()) { |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1178 |
case Op_LoadUB: return this; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1179 |
case Op_LoadUS: return this; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1180 |
case Op_LoadB: bt = T_BOOLEAN; rt = TypeInt::UBYTE; break; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1181 |
case Op_LoadS: bt = T_CHAR; rt = TypeInt::CHAR; break; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1182 |
default: |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1183 |
assert(false, "no unsigned variant: %s", Name()); |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1184 |
return NULL; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1185 |
} |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1186 |
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), |
36840
29d9167ccc82
8152972: C2 crashes with SIGSEGV in LoadNode::make
vlivanov
parents:
36831
diff
changeset
|
1187 |
raw_adr_type(), rt, bt, _mo, _control_dependency, |
36830
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1188 |
is_unaligned_access(), is_mismatched_access()); |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1189 |
} |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1190 |
|
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1191 |
// Construct an equivalent signed load. |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1192 |
Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) { |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1193 |
BasicType bt = T_ILLEGAL; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1194 |
const Type* rt = NULL; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1195 |
switch (Opcode()) { |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1196 |
case Op_LoadUB: bt = T_BYTE; rt = TypeInt::BYTE; break; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1197 |
case Op_LoadUS: bt = T_SHORT; rt = TypeInt::SHORT; break; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1198 |
case Op_LoadB: // fall through |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1199 |
case Op_LoadS: // fall through |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1200 |
case Op_LoadI: // fall through |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1201 |
case Op_LoadL: return this; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1202 |
default: |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1203 |
assert(false, "no signed variant: %s", Name()); |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1204 |
return NULL; |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1205 |
} |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1206 |
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), |
36840
29d9167ccc82
8152972: C2 crashes with SIGSEGV in LoadNode::make
vlivanov
parents:
36831
diff
changeset
|
1207 |
raw_adr_type(), rt, bt, _mo, _control_dependency, |
36830
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1208 |
is_unaligned_access(), is_mismatched_access()); |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1209 |
} |
ebc8b5e23f63
8152773: C2: LoadNode properties aren't preserved when converting between signed/unsigned variants
vlivanov
parents:
36554
diff
changeset
|
1210 |
|
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1211 |
// We're loading from an object which has autobox behaviour. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1212 |
// If this object is result of a valueOf call we'll have a phi |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1213 |
// merging a newly allocated object and a load from the cache. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1214 |
// We want to replace this load with the original incoming |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1215 |
// argument to the valueOf call. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1216 |
Node* LoadNode::eliminate_autobox(PhaseGVN* phase) { |
17383 | 1217 |
assert(phase->C->eliminate_boxing(), "sanity"); |
1218 |
intptr_t ignore = 0; |
|
1219 |
Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore); |
|
1220 |
if ((base == NULL) || base->is_Phi()) { |
|
1221 |
// Push the loads from the phi that comes from valueOf up |
|
1222 |
// through it to allow elimination of the loads and the recovery |
|
1223 |
// of the original value. It is done in split_through_phi(). |
|
1224 |
return NULL; |
|
2109
f472b58223b5
6807084: AutoBox elimination is broken with compressed oops
kvn
parents:
2034
diff
changeset
|
1225 |
} else if (base->is_Load() || |
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
1226 |
(base->is_DecodeN() && base->in(1)->is_Load())) { |
17383 | 1227 |
// Eliminate the load of boxed value for integer types from the cache |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1228 |
// array by deriving the value from the index into the array. |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1229 |
// Capture the offset of the load and then reverse the computation. |
17383 | 1230 |
|
1231 |
// Get LoadN node which loads a boxing object from 'cache' array. |
|
1232 |
if (base->is_DecodeN()) { |
|
1233 |
base = base->in(1); |
|
1234 |
} |
|
1235 |
if (!base->in(Address)->is_AddP()) { |
|
1236 |
return NULL; // Complex address |
|
2109
f472b58223b5
6807084: AutoBox elimination is broken with compressed oops
kvn
parents:
2034
diff
changeset
|
1237 |
} |
17383 | 1238 |
AddPNode* address = base->in(Address)->as_AddP(); |
1239 |
Node* cache_base = address->in(AddPNode::Base); |
|
1240 |
if ((cache_base != NULL) && cache_base->is_DecodeN()) { |
|
1241 |
// Get ConP node which is static 'cache' field. |
|
1242 |
cache_base = cache_base->in(1); |
|
1243 |
} |
|
1244 |
if ((cache_base != NULL) && cache_base->is_Con()) { |
|
1245 |
const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr(); |
|
1246 |
if ((base_type != NULL) && base_type->is_autobox_cache()) { |
|
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1247 |
Node* elements[4]; |
17383 | 1248 |
int shift = exact_log2(type2aelembytes(T_OBJECT)); |
1249 |
int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); |
|
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
1250 |
if (count > 0 && elements[0]->is_Con() && |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
1251 |
(count == 1 || |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
1252 |
(count == 2 && elements[1]->Opcode() == Op_LShiftX && |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
1253 |
elements[1]->in(2) == phase->intcon(shift)))) { |
17383 | 1254 |
ciObjArray* array = base_type->const_oop()->as_obj_array(); |
1255 |
// Fetch the box object cache[0] at the base of the array and get its value |
|
1256 |
ciInstance* box = array->obj_at(0)->as_instance(); |
|
1257 |
ciInstanceKlass* ik = box->klass()->as_instance_klass(); |
|
1258 |
assert(ik->is_box_klass(), "sanity"); |
|
1259 |
assert(ik->nof_nonstatic_fields() == 1, "change following code"); |
|
1260 |
if (ik->nof_nonstatic_fields() == 1) { |
|
1261 |
// This should be true nonstatic_field_at requires calling |
|
1262 |
// nof_nonstatic_fields so check it anyway |
|
1263 |
ciConstant c = box->field_value(ik->nonstatic_field_at(0)); |
|
1264 |
BasicType bt = c.basic_type(); |
|
1265 |
// Only integer types have boxing cache. |
|
1266 |
assert(bt == T_BOOLEAN || bt == T_CHAR || |
|
1267 |
bt == T_BYTE || bt == T_SHORT || |
|
33105
294e48b4f704
8080775: Better argument formatting for assert() and friends
david
parents:
32573
diff
changeset
|
1268 |
bt == T_INT || bt == T_LONG, "wrong type = %s", type2name(bt)); |
17383 | 1269 |
jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int(); |
1270 |
if (cache_low != (int)cache_low) { |
|
1271 |
return NULL; // should not happen since cache is array indexed by value |
|
1272 |
} |
|
1273 |
jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift); |
|
1274 |
if (offset != (int)offset) { |
|
1275 |
return NULL; // should not happen since cache is array indexed by value |
|
1276 |
} |
|
1277 |
// Add up all the offsets making of the address of the load |
|
1278 |
Node* result = elements[0]; |
|
1279 |
for (int i = 1; i < count; i++) { |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1280 |
result = phase->transform(new AddXNode(result, elements[i])); |
17383 | 1281 |
} |
1282 |
// Remove the constant offset from the address and then |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1283 |
result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset))); |
17383 | 1284 |
// remove the scaling of the offset to recover the original index. |
1285 |
if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) { |
|
1286 |
// Peel the shift off directly but wrap it in a dummy node |
|
1287 |
// since Ideal can't return existing nodes |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1288 |
result = new RShiftXNode(result->in(1), phase->intcon(0)); |
17383 | 1289 |
} else if (result->is_Add() && result->in(2)->is_Con() && |
1290 |
result->in(1)->Opcode() == Op_LShiftX && |
|
1291 |
result->in(1)->in(2) == phase->intcon(shift)) { |
|
1292 |
// We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z) |
|
1293 |
// but for boxing cache access we know that X<<Z will not overflow |
|
1294 |
// (there is range check) so we do this optimizatrion by hand here. |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1295 |
Node* add_con = new RShiftXNode(result->in(2), phase->intcon(shift)); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1296 |
result = new AddXNode(result->in(1)->in(1), phase->transform(add_con)); |
17383 | 1297 |
} else { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1298 |
result = new RShiftXNode(result, phase->intcon(shift)); |
17383 | 1299 |
} |
1300 |
#ifdef _LP64 |
|
1301 |
if (bt != T_LONG) { |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1302 |
result = new ConvL2INode(phase->transform(result)); |
17383 | 1303 |
} |
1304 |
#else |
|
1305 |
if (bt == T_LONG) { |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1306 |
result = new ConvI2LNode(phase->transform(result)); |
17383 | 1307 |
} |
1308 |
#endif |
|
27421
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1309 |
// Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair). |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1310 |
// Need to preserve unboxing load type if it is unsigned. |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1311 |
switch(this->Opcode()) { |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1312 |
case Op_LoadUB: |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1313 |
result = new AndINode(phase->transform(result), phase->intcon(0xFF)); |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1314 |
break; |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1315 |
case Op_LoadUS: |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1316 |
result = new AndINode(phase->transform(result), phase->intcon(0xFFFF)); |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1317 |
break; |
df2b6ff5c959
8058847: C2: EliminateAutoBox regression after 8042786
vlivanov
parents:
26442
diff
changeset
|
1318 |
} |
17383 | 1319 |
return result; |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1320 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1321 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1322 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1323 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1324 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1325 |
return NULL; |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1326 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1327 |
|
17383 | 1328 |
static bool stable_phi(PhiNode* phi, PhaseGVN *phase) { |
1329 |
Node* region = phi->in(0); |
|
589 | 1330 |
if (region == NULL) { |
17383 | 1331 |
return false; // Wait stable graph |
589 | 1332 |
} |
17383 | 1333 |
uint cnt = phi->req(); |
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1334 |
for (uint i = 1; i < cnt; i++) { |
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1335 |
Node* rc = region->in(i); |
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1336 |
if (rc == NULL || phase->type(rc) == Type::TOP) |
17383 | 1337 |
return false; // Wait stable graph |
1338 |
Node* in = phi->in(i); |
|
1339 |
if (in == NULL || phase->type(in) == Type::TOP) |
|
1340 |
return false; // Wait stable graph |
|
1341 |
} |
|
1342 |
return true; |
|
1343 |
} |
|
1344 |
//------------------------------split_through_phi------------------------------ |
|
1345 |
// Split instance or boxed field load through Phi. |
|
1346 |
Node *LoadNode::split_through_phi(PhaseGVN *phase) { |
|
1347 |
Node* mem = in(Memory); |
|
1348 |
Node* address = in(Address); |
|
1349 |
const TypeOopPtr *t_oop = phase->type(address)->isa_oopptr(); |
|
1350 |
||
1351 |
assert((t_oop != NULL) && |
|
1352 |
(t_oop->is_known_instance_field() || |
|
1353 |
t_oop->is_ptr_to_boxed_value()), "invalide conditions"); |
|
1354 |
||
1355 |
Compile* C = phase->C; |
|
1356 |
intptr_t ignore = 0; |
|
1357 |
Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); |
|
1358 |
bool base_is_phi = (base != NULL) && base->is_Phi(); |
|
1359 |
bool load_boxed_values = t_oop->is_ptr_to_boxed_value() && C->aggressive_unboxing() && |
|
1360 |
(base != NULL) && (base == address->in(AddPNode::Base)) && |
|
1361 |
phase->type(base)->higher_equal(TypePtr::NOTNULL); |
|
1362 |
||
1363 |
if (!((mem->is_Phi() || base_is_phi) && |
|
1364 |
(load_boxed_values || t_oop->is_known_instance_field()))) { |
|
1365 |
return NULL; // memory is not Phi |
|
1366 |
} |
|
1367 |
||
1368 |
if (mem->is_Phi()) { |
|
1369 |
if (!stable_phi(mem->as_Phi(), phase)) { |
|
589 | 1370 |
return NULL; // Wait stable graph |
1371 |
} |
|
17383 | 1372 |
uint cnt = mem->req(); |
1373 |
// Check for loop invariant memory. |
|
1374 |
if (cnt == 3) { |
|
1375 |
for (uint i = 1; i < cnt; i++) { |
|
1376 |
Node* in = mem->in(i); |
|
1377 |
Node* m = optimize_memory_chain(in, t_oop, this, phase); |
|
1378 |
if (m == mem) { |
|
1379 |
set_req(Memory, mem->in(cnt - i)); |
|
1380 |
return this; // made change |
|
1381 |
} |
|
589 | 1382 |
} |
1383 |
} |
|
1384 |
} |
|
17383 | 1385 |
if (base_is_phi) { |
1386 |
if (!stable_phi(base->as_Phi(), phase)) { |
|
1387 |
return NULL; // Wait stable graph |
|
1388 |
} |
|
1389 |
uint cnt = base->req(); |
|
1390 |
// Check for loop invariant memory. |
|
1391 |
if (cnt == 3) { |
|
1392 |
for (uint i = 1; i < cnt; i++) { |
|
1393 |
if (base->in(i) == base) { |
|
1394 |
return NULL; // Wait stable graph |
|
1395 |
} |
|
1396 |
} |
|
1397 |
} |
|
1398 |
} |
|
1399 |
||
1400 |
bool load_boxed_phi = load_boxed_values && base_is_phi && (base->in(0) == mem->in(0)); |
|
1401 |
||
589 | 1402 |
// Split through Phi (see original code in loopopts.cpp). |
17383 | 1403 |
assert(C->have_alias_type(t_oop), "instance should have alias type"); |
589 | 1404 |
|
1405 |
// Do nothing here if Identity will find a value |
|
1406 |
// (to avoid infinite chain of value phis generation). |
|
52429
b64514ff68fd
8213381: Hook to allow GC to inject Node::Ideal() calls
rkennke
parents:
52224
diff
changeset
|
1407 |
if (!phase->eqv(this, phase->apply_identity(this))) |
589 | 1408 |
return NULL; |
1409 |
||
17383 | 1410 |
// Select Region to split through. |
1411 |
Node* region; |
|
1412 |
if (!base_is_phi) { |
|
1413 |
assert(mem->is_Phi(), "sanity"); |
|
1414 |
region = mem->in(0); |
|
1415 |
// Skip if the region dominates some control edge of the address. |
|
1416 |
if (!MemNode::all_controls_dominate(address, region)) |
|
1417 |
return NULL; |
|
1418 |
} else if (!mem->is_Phi()) { |
|
1419 |
assert(base_is_phi, "sanity"); |
|
1420 |
region = base->in(0); |
|
1421 |
// Skip if the region dominates some control edge of the memory. |
|
1422 |
if (!MemNode::all_controls_dominate(mem, region)) |
|
1423 |
return NULL; |
|
1424 |
} else if (base->in(0) != mem->in(0)) { |
|
1425 |
assert(base_is_phi && mem->is_Phi(), "sanity"); |
|
1426 |
if (MemNode::all_controls_dominate(mem, base->in(0))) { |
|
1427 |
region = base->in(0); |
|
1428 |
} else if (MemNode::all_controls_dominate(address, mem->in(0))) { |
|
1429 |
region = mem->in(0); |
|
1430 |
} else { |
|
1431 |
return NULL; // complex graph |
|
1432 |
} |
|
1433 |
} else { |
|
1434 |
assert(base->in(0) == mem->in(0), "sanity"); |
|
1435 |
region = mem->in(0); |
|
1436 |
} |
|
589 | 1437 |
|
1438 |
const Type* this_type = this->bottom_type(); |
|
17383 | 1439 |
int this_index = C->get_alias_index(t_oop); |
1440 |
int this_offset = t_oop->offset(); |
|
1441 |
int this_iid = t_oop->instance_id(); |
|
1442 |
if (!t_oop->is_known_instance() && load_boxed_values) { |
|
1443 |
// Use _idx of address base for boxed values. |
|
1444 |
this_iid = base->_idx; |
|
1445 |
} |
|
1446 |
PhaseIterGVN* igvn = phase->is_IterGVN(); |
|
40871
82848b0654f8
8038348: Instance field load is replaced by wrong data Phi
thartmann
parents:
40047
diff
changeset
|
1447 |
Node* phi = new PhiNode(region, this_type, NULL, mem->_idx, this_iid, this_index, this_offset); |
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1448 |
for (uint i = 1; i < region->req(); i++) { |
17383 | 1449 |
Node* x; |
589 | 1450 |
Node* the_clone = NULL; |
17383 | 1451 |
if (region->in(i) == C->top()) { |
1452 |
x = C->top(); // Dead path? Use a dead data op |
|
589 | 1453 |
} else { |
1454 |
x = this->clone(); // Else clone up the data op |
|
1455 |
the_clone = x; // Remember for possible deletion. |
|
1456 |
// Alter data node to use pre-phi inputs |
|
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1457 |
if (this->in(0) == region) { |
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1458 |
x->set_req(0, region->in(i)); |
589 | 1459 |
} else { |
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1460 |
x->set_req(0, NULL); |
589 | 1461 |
} |
17383 | 1462 |
if (mem->is_Phi() && (mem->in(0) == region)) { |
1463 |
x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone. |
|
1464 |
} |
|
1465 |
if (address->is_Phi() && address->in(0) == region) { |
|
1466 |
x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone |
|
1467 |
} |
|
1468 |
if (base_is_phi && (base->in(0) == region)) { |
|
1469 |
Node* base_x = base->in(i); // Clone address for loads from boxed objects. |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1470 |
Node* adr_x = phase->transform(new AddPNode(base_x,base_x,address->in(AddPNode::Offset))); |
17383 | 1471 |
x->set_req(Address, adr_x); |
589 | 1472 |
} |
1473 |
} |
|
1474 |
// Check for a 'win' on some paths |
|
1475 |
const Type *t = x->Value(igvn); |
|
1476 |
||
1477 |
bool singleton = t->singleton(); |
|
1478 |
||
1479 |
// See comments in PhaseIdealLoop::split_thru_phi(). |
|
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1480 |
if (singleton && t == Type::TOP) { |
589 | 1481 |
singleton &= region->is_Loop() && (i != LoopNode::EntryControl); |
1482 |
} |
|
1483 |
||
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1484 |
if (singleton) { |
589 | 1485 |
x = igvn->makecon(t); |
1486 |
} else { |
|
1487 |
// We now call Identity to try to simplify the cloned node. |
|
1488 |
// Note that some Identity methods call phase->type(this). |
|
1489 |
// Make sure that the type array is big enough for |
|
1490 |
// our new node, even though we may throw the node away. |
|
1491 |
// (This tweaking with igvn only works because x is a new node.) |
|
1492 |
igvn->set_type(x, t); |
|
1055
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1493 |
// If x is a TypeNode, capture any more-precise type permanently into Node |
2131 | 1494 |
// otherwise it will be not updated during igvn->transform since |
1055
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1495 |
// igvn->type(x) is set to x->Value() already. |
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1496 |
x->raise_bottom_type(t); |
52429
b64514ff68fd
8213381: Hook to allow GC to inject Node::Ideal() calls
rkennke
parents:
52224
diff
changeset
|
1497 |
Node *y = igvn->apply_identity(x); |
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1498 |
if (y != x) { |
589 | 1499 |
x = y; |
1500 |
} else { |
|
17383 | 1501 |
y = igvn->hash_find_insert(x); |
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1502 |
if (y) { |
589 | 1503 |
x = y; |
1504 |
} else { |
|
1505 |
// Else x is a new node we are keeping |
|
1506 |
// We do not need register_new_node_with_optimizer |
|
1507 |
// because set_type has already been called. |
|
1508 |
igvn->_worklist.push(x); |
|
1509 |
} |
|
1510 |
} |
|
1511 |
} |
|
17383 | 1512 |
if (x != the_clone && the_clone != NULL) { |
589 | 1513 |
igvn->remove_dead_node(the_clone); |
17383 | 1514 |
} |
589 | 1515 |
phi->set_req(i, x); |
1516 |
} |
|
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1517 |
// Record Phi |
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1518 |
igvn->register_new_node_with_optimizer(phi); |
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1519 |
return phi; |
589 | 1520 |
} |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1521 |
|
1 | 1522 |
//------------------------------Ideal------------------------------------------ |
27637
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
1523 |
// If the load is from Field memory and the pointer is non-null, it might be possible to |
1 | 1524 |
// zero out the control input. |
1525 |
// If the offset is constant and the base is an object allocation, |
|
1526 |
// try to hook me up to the exact initializing store. |
|
1527 |
Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1528 |
Node* p = MemNode::Ideal_common(phase, can_reshape); |
|
1529 |
if (p) return (p == NodeSentinel) ? NULL : p; |
|
1530 |
||
1531 |
Node* ctrl = in(MemNode::Control); |
|
1532 |
Node* address = in(MemNode::Address); |
|
25913
81dbc151e91c
8040213: C2 does not put all modified nodes on IGVN worklist
thartmann
parents:
24923
diff
changeset
|
1533 |
bool progress = false; |
1 | 1534 |
|
1535 |
// Skip up past a SafePoint control. Cannot do this for Stores because |
|
1536 |
// pointer stores & cardmarks must stay on the same side of a SafePoint. |
|
1537 |
if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint && |
|
1538 |
phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) { |
|
1539 |
ctrl = ctrl->in(0); |
|
1540 |
set_req(MemNode::Control,ctrl); |
|
25913
81dbc151e91c
8040213: C2 does not put all modified nodes on IGVN worklist
thartmann
parents:
24923
diff
changeset
|
1541 |
progress = true; |
1 | 1542 |
} |
1543 |
||
2532
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
1544 |
intptr_t ignore = 0; |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
1545 |
Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
1546 |
if (base != NULL |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
1547 |
&& phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) { |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
1548 |
// Check for useless control edge in some common special cases |
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
1549 |
if (in(MemNode::Control) != NULL |
27637
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
1550 |
&& can_remove_control() |
1 | 1551 |
&& phase->type(base)->higher_equal(TypePtr::NOTNULL) |
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
1552 |
&& all_controls_dominate(base, phase->C->start())) { |
1 | 1553 |
// A method-invariant, non-null address (constant or 'this' argument). |
1554 |
set_req(MemNode::Control, NULL); |
|
25913
81dbc151e91c
8040213: C2 does not put all modified nodes on IGVN worklist
thartmann
parents:
24923
diff
changeset
|
1555 |
progress = true; |
1 | 1556 |
} |
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1557 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1558 |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1559 |
Node* mem = in(MemNode::Memory); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1560 |
const TypePtr *addr_t = phase->type(address)->isa_ptr(); |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1561 |
|
17383 | 1562 |
if (can_reshape && (addr_t != NULL)) { |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1563 |
// try to optimize our memory input |
17383 | 1564 |
Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase); |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1565 |
if (opt_mem != mem) { |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1566 |
set_req(MemNode::Memory, opt_mem); |
1067 | 1567 |
if (phase->type( opt_mem ) == Type::TOP) return NULL; |
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1568 |
return this; |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1569 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1570 |
const TypeOopPtr *t_oop = addr_t->isa_oopptr(); |
17383 | 1571 |
if ((t_oop != NULL) && |
1572 |
(t_oop->is_known_instance_field() || |
|
1573 |
t_oop->is_ptr_to_boxed_value())) { |
|
10988
a3b2bd43ef4f
7107042: assert(no_dead_loop) failed: dead loop detected
kvn
parents:
10566
diff
changeset
|
1574 |
PhaseIterGVN *igvn = phase->is_IterGVN(); |
a3b2bd43ef4f
7107042: assert(no_dead_loop) failed: dead loop detected
kvn
parents:
10566
diff
changeset
|
1575 |
if (igvn != NULL && igvn->_worklist.member(opt_mem)) { |
a3b2bd43ef4f
7107042: assert(no_dead_loop) failed: dead loop detected
kvn
parents:
10566
diff
changeset
|
1576 |
// Delay this transformation until memory Phi is processed. |
a3b2bd43ef4f
7107042: assert(no_dead_loop) failed: dead loop detected
kvn
parents:
10566
diff
changeset
|
1577 |
phase->is_IterGVN()->_worklist.push(this); |
a3b2bd43ef4f
7107042: assert(no_dead_loop) failed: dead loop detected
kvn
parents:
10566
diff
changeset
|
1578 |
return NULL; |
a3b2bd43ef4f
7107042: assert(no_dead_loop) failed: dead loop detected
kvn
parents:
10566
diff
changeset
|
1579 |
} |
589 | 1580 |
// Split instance field load through Phi. |
1581 |
Node* result = split_through_phi(phase); |
|
1582 |
if (result != NULL) return result; |
|
17383 | 1583 |
|
1584 |
if (t_oop->is_ptr_to_boxed_value()) { |
|
1585 |
Node* result = eliminate_autobox(phase); |
|
1586 |
if (result != NULL) return result; |
|
1587 |
} |
|
247
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1588 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1589 |
} |
2aeab9ac7fea
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
241
diff
changeset
|
1590 |
|
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1591 |
// Is there a dominating load that loads the same value? Leave |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1592 |
// anything that is not a load of a field/array element (like |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1593 |
// barriers etc.) alone |
50525
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
1594 |
if (in(0) != NULL && !adr_type()->isa_rawptr() && can_reshape) { |
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1595 |
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1596 |
Node *use = mem->fast_out(i); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1597 |
if (use != this && |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1598 |
use->Opcode() == Opcode() && |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1599 |
use->in(0) != NULL && |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1600 |
use->in(0) != in(0) && |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1601 |
use->in(Address) == in(Address)) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1602 |
Node* ctl = in(0); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1603 |
for (int i = 0; i < 10 && ctl != NULL; i++) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1604 |
ctl = IfNode::up_one_dom(ctl); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1605 |
if (ctl == use->in(0)) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1606 |
set_req(0, use->in(0)); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1607 |
return this; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1608 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1609 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1610 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1611 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1612 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1613 |
|
1 | 1614 |
// Check for prior store with a different base or offset; make Load |
1615 |
// independent. Skip through any number of them. Bail out if the stores |
|
1616 |
// are in an endless dead cycle and report no progress. This is a key |
|
1617 |
// transform for Reflection. However, if after skipping through the Stores |
|
1618 |
// we can't then fold up against a prior store do NOT do the transform as |
|
1619 |
// this amounts to using the 'Oracle' model of aliasing. It leaves the same |
|
1620 |
// array memory alive twice: once for the hoisted Load and again after the |
|
1621 |
// bypassed Store. This situation only works if EVERYBODY who does |
|
1622 |
// anti-dependence work knows how to bypass. I.e. we need all |
|
1623 |
// anti-dependence checks to ask the same Oracle. Right now, that Oracle is |
|
1624 |
// the alias index stuff. So instead, peek through Stores and IFF we can |
|
1625 |
// fold up, do so. |
|
1626 |
Node* prev_mem = find_previous_store(phase); |
|
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1627 |
if (prev_mem != NULL) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1628 |
Node* value = can_see_arraycopy_value(prev_mem, phase); |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1629 |
if (value != NULL) { |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1630 |
return value; |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1631 |
} |
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
1632 |
} |
1 | 1633 |
// Steps (a), (b): Walk past independent stores to find an exact match. |
1634 |
if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) { |
|
1635 |
// (c) See if we can fold up on the spot, but don't fold up here. |
|
2022
28ce8115a91d
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
2017
diff
changeset
|
1636 |
// Fold-up might require truncation (for LoadB/LoadS/LoadUS) or |
1 | 1637 |
// just return a prior value, which is done by Identity calls. |
1638 |
if (can_see_stored_value(prev_mem, phase)) { |
|
1639 |
// Make ready for step (d): |
|
1640 |
set_req(MemNode::Memory, prev_mem); |
|
1641 |
return this; |
|
1642 |
} |
|
1643 |
} |
|
1644 |
||
25913
81dbc151e91c
8040213: C2 does not put all modified nodes on IGVN worklist
thartmann
parents:
24923
diff
changeset
|
1645 |
return progress ? this : NULL; |
1 | 1646 |
} |
1647 |
||
1648 |
// Helper to recognize certain Klass fields which are invariant across |
|
1649 |
// some group of array types (e.g., int[] or all T[] where T < Object). |
|
1650 |
const Type* |
|
1651 |
LoadNode::load_array_final_field(const TypeKlassPtr *tkls, |
|
1652 |
ciKlass* klass) const { |
|
11430
718fc06da49a
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
11200
diff
changeset
|
1653 |
if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) { |
1 | 1654 |
// The field is Klass::_modifier_flags. Return its (constant) value. |
1655 |
// (Folds up the 2nd indirection in aClassConstant.getModifiers().) |
|
1656 |
assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags"); |
|
1657 |
return TypeInt::make(klass->modifier_flags()); |
|
1658 |
} |
|
11430
718fc06da49a
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
11200
diff
changeset
|
1659 |
if (tkls->offset() == in_bytes(Klass::access_flags_offset())) { |
1 | 1660 |
// The field is Klass::_access_flags. Return its (constant) value. |
1661 |
// (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).) |
|
1662 |
assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags"); |
|
1663 |
return TypeInt::make(klass->access_flags()); |
|
1664 |
} |
|
11430
718fc06da49a
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
11200
diff
changeset
|
1665 |
if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) { |
1 | 1666 |
// The field is Klass::_layout_helper. Return its constant value if known. |
1667 |
assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper"); |
|
1668 |
return TypeInt::make(klass->layout_helper()); |
|
1669 |
} |
|
1670 |
||
1671 |
// No match. |
|
1672 |
return NULL; |
|
1673 |
} |
|
1674 |
||
1675 |
//------------------------------Value----------------------------------------- |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
1676 |
const Type* LoadNode::Value(PhaseGVN* phase) const { |
1 | 1677 |
// Either input is TOP ==> the result is TOP |
1678 |
Node* mem = in(MemNode::Memory); |
|
1679 |
const Type *t1 = phase->type(mem); |
|
1680 |
if (t1 == Type::TOP) return Type::TOP; |
|
1681 |
Node* adr = in(MemNode::Address); |
|
1682 |
const TypePtr* tp = phase->type(adr)->isa_ptr(); |
|
1683 |
if (tp == NULL || tp->empty()) return Type::TOP; |
|
1684 |
int off = tp->offset(); |
|
1685 |
assert(off != Type::OffsetTop, "case covered by TypePtr::empty"); |
|
10511
22b3580bd8bb
7071709: JSR 292: switchpoint invalidation should be pushed not pulled
twisti
parents:
10262
diff
changeset
|
1686 |
Compile* C = phase->C; |
1 | 1687 |
|
1688 |
// Try to guess loaded type from pointer type |
|
19770
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1689 |
if (tp->isa_aryptr()) { |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1690 |
const TypeAryPtr* ary = tp->is_aryptr(); |
23210
3dd7a99b8173
8036667: "assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con()) failed: offset is a constant" with FoldStableValues on
vlivanov
parents:
22873
diff
changeset
|
1691 |
const Type* t = ary->elem(); |
19770
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1692 |
|
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1693 |
// Determine whether the reference is beyond the header or not, by comparing |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1694 |
// the offset against the offset of the start of the array's data. |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1695 |
// Different array types begin at slightly different offsets (12 vs. 16). |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1696 |
// We choose T_BYTE as an example base type that is least restrictive |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1697 |
// as to alignment, which will therefore produce the smallest |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1698 |
// possible base offset. |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1699 |
const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1700 |
const bool off_beyond_header = ((uint)off >= (uint)min_base_off); |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1701 |
|
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1702 |
// Try to constant-fold a stable array element. |
38030
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1703 |
if (FoldStableValues && !is_mismatched_access() && ary->is_stable()) { |
23210
3dd7a99b8173
8036667: "assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con()) failed: offset is a constant" with FoldStableValues on
vlivanov
parents:
22873
diff
changeset
|
1704 |
// Make sure the reference is not into the header and the offset is constant |
38030
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1705 |
ciObject* aobj = ary->const_oop(); |
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1706 |
if (aobj != NULL && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { |
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1707 |
int stable_dimension = (ary->stable_dimension() > 0 ? ary->stable_dimension() - 1 : 0); |
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1708 |
const Type* con_type = Type::make_constant_from_array_element(aobj->as_array(), off, |
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1709 |
stable_dimension, |
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1710 |
memory_type(), is_unsigned()); |
19770
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1711 |
if (con_type != NULL) { |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1712 |
return con_type; |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1713 |
} |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1714 |
} |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1715 |
} |
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1716 |
|
1 | 1717 |
// Don't do this for integer types. There is only potential profit if |
1718 |
// the element type t is lower than _type; that is, for int types, if _type is |
|
1719 |
// more restrictive than t. This only happens here if one is short and the other |
|
1720 |
// char (both 16 bits), and in those cases we've made an intentional decision |
|
1721 |
// to use one kind of load over the other. See AndINode::Ideal and 4965907. |
|
1722 |
// Also, do not try to narrow the type for a LoadKlass, regardless of offset. |
|
1723 |
// |
|
1724 |
// Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8)) |
|
1725 |
// where the _gvn.type of the AddP is wider than 8. This occurs when an earlier |
|
1726 |
// copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been |
|
1727 |
// subsumed by p1. If p1 is on the worklist but has not yet been re-transformed, |
|
1728 |
// it is possible that p1 will have a type like Foo*[int+]:NotNull*+any. |
|
1729 |
// In fact, that could have been the original type of p1, and p1 could have |
|
1730 |
// had an original form like p1:(AddP x x (LShiftL quux 3)), where the |
|
1731 |
// expression (LShiftL quux 3) independently optimized to the constant 8. |
|
1732 |
if ((t->isa_int() == NULL) && (t->isa_long() == NULL) |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
12953
diff
changeset
|
1733 |
&& (_type->isa_vect() == NULL) |
1055
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
1734 |
&& Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { |
1 | 1735 |
// t might actually be lower than _type, if _type is a unique |
1736 |
// concrete subclass of abstract class t. |
|
19770
7cb9f982ea81
8001107: @Stable annotation for constant folding of lazily evaluated variables
vlivanov
parents:
18449
diff
changeset
|
1737 |
if (off_beyond_header) { // is the offset beyond the header? |
22799
83e58bac7980
8027422: assert(_gvn.type(obj)->higher_equal(tjp)) failed: cast_up is no longer needed
roland
parents:
22234
diff
changeset
|
1738 |
const Type* jt = t->join_speculative(_type); |
1 | 1739 |
// In any case, do not allow the join, per se, to empty out the type. |
1740 |
if (jt->empty() && !t->empty()) { |
|
1741 |
// This can happen if a interface-typed array narrows to a class type. |
|
1742 |
jt = _type; |
|
1743 |
} |
|
17383 | 1744 |
#ifdef ASSERT |
1745 |
if (phase->C->eliminate_boxing() && adr->is_AddP()) { |
|
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1746 |
// The pointers in the autobox arrays are always non-null |
2532
da0b1680593b
6711117: Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
kvn
parents:
2348
diff
changeset
|
1747 |
Node* base = adr->in(AddPNode::Base); |
17383 | 1748 |
if ((base != NULL) && base->is_DecodeN()) { |
1749 |
// Get LoadN node which loads IntegerCache.cache field |
|
1750 |
base = base->in(1); |
|
1751 |
} |
|
1752 |
if ((base != NULL) && base->is_Con()) { |
|
1753 |
const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr(); |
|
1754 |
if ((base_type != NULL) && base_type->is_autobox_cache()) { |
|
1755 |
// It could be narrow oop |
|
1756 |
assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity"); |
|
190
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1757 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1758 |
} |
e9a0a9dcd4f6
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
1
diff
changeset
|
1759 |
} |
17383 | 1760 |
#endif |
1 | 1761 |
return jt; |
1762 |
} |
|
1763 |
} |
|
1764 |
} else if (tp->base() == Type::InstPtr) { |
|
1765 |
assert( off != Type::OffsetBot || |
|
1766 |
// arrays can be cast to Objects |
|
1767 |
tp->is_oopptr()->klass()->is_java_lang_Object() || |
|
1768 |
// unsafe field access may not have a constant offset |
|
10511
22b3580bd8bb
7071709: JSR 292: switchpoint invalidation should be pushed not pulled
twisti
parents:
10262
diff
changeset
|
1769 |
C->has_unsafe_access(), |
1 | 1770 |
"Field accesses must be precise" ); |
36077
fa6d92de1c70
8149813: Move trusted final field handling from C2 LoadNode::Value to shared code
shade
parents:
35551
diff
changeset
|
1771 |
// For oop loads, we expect the _type to be precise. |
40047
124ad42f6e96
8160360: Mismatched field loads are folded in LoadNode::Value
vlivanov
parents:
39254
diff
changeset
|
1772 |
|
124ad42f6e96
8160360: Mismatched field loads are folded in LoadNode::Value
vlivanov
parents:
39254
diff
changeset
|
1773 |
// Optimize loads from constant fields. |
124ad42f6e96
8160360: Mismatched field loads are folded in LoadNode::Value
vlivanov
parents:
39254
diff
changeset
|
1774 |
const TypeInstPtr* tinst = tp->is_instptr(); |
10511
22b3580bd8bb
7071709: JSR 292: switchpoint invalidation should be pushed not pulled
twisti
parents:
10262
diff
changeset
|
1775 |
ciObject* const_oop = tinst->const_oop(); |
40047
124ad42f6e96
8160360: Mismatched field loads are folded in LoadNode::Value
vlivanov
parents:
39254
diff
changeset
|
1776 |
if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) { |
38030
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1777 |
const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type()); |
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1778 |
if (con_type != NULL) { |
93f24e7b3c43
8152590: C2: @Stable support doesn't always work w/ incremental inlining
vlivanov
parents:
38017
diff
changeset
|
1779 |
return con_type; |
10511
22b3580bd8bb
7071709: JSR 292: switchpoint invalidation should be pushed not pulled
twisti
parents:
10262
diff
changeset
|
1780 |
} |
22b3580bd8bb
7071709: JSR 292: switchpoint invalidation should be pushed not pulled
twisti
parents:
10262
diff
changeset
|
1781 |
} |
1 | 1782 |
} else if (tp->base() == Type::KlassPtr) { |
1783 |
assert( off != Type::OffsetBot || |
|
1784 |
// arrays can be cast to Objects |
|
1785 |
tp->is_klassptr()->klass()->is_java_lang_Object() || |
|
1786 |
// also allow array-loading from the primary supertype |
|
1787 |
// array during subtype checks |
|
1788 |
Opcode() == Op_LoadKlass, |
|
1789 |
"Field accesses must be precise" ); |
|
1790 |
// For klass/static loads, we expect the _type to be precise |
|
47580 | 1791 |
} else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) { |
1792 |
/* With mirrors being an indirect in the Klass* |
|
1793 |
* the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset)) |
|
1794 |
* The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass). |
|
1795 |
* |
|
1796 |
* So check the type and klass of the node before the LoadP. |
|
1797 |
*/ |
|
1798 |
Node* adr2 = adr->in(MemNode::Address); |
|
1799 |
const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); |
|
1800 |
if (tkls != NULL && !StressReflectiveCode) { |
|
1801 |
ciKlass* klass = tkls->klass(); |
|
1802 |
if (klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) { |
|
1803 |
assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror"); |
|
1804 |
assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); |
|
1805 |
return TypeInstPtr::make(klass->java_mirror()); |
|
1806 |
} |
|
1807 |
} |
|
1 | 1808 |
} |
1809 |
||
1810 |
const TypeKlassPtr *tkls = tp->isa_klassptr(); |
|
1811 |
if (tkls != NULL && !StressReflectiveCode) { |
|
1812 |
ciKlass* klass = tkls->klass(); |
|
1813 |
if (klass->is_loaded() && tkls->klass_is_exact()) { |
|
1814 |
// We are loading a field from a Klass metaobject whose identity |
|
1815 |
// is known at compile time (the type is "exact" or "precise"). |
|
1816 |
// Check for fields we know are maintained as constants by the VM. |
|
11430
718fc06da49a
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
11200
diff
changeset
|
1817 |
if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) { |
1 | 1818 |
// The field is Klass::_super_check_offset. Return its (constant) value. |
1819 |
// (Folds up type checking code.) |
|
1820 |
assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); |
|
1821 |
return TypeInt::make(klass->super_check_offset()); |
|
1822 |
} |
|
1823 |
// Compute index into primary_supers array |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13104
diff
changeset
|
1824 |
juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); |
1 | 1825 |
// Check for overflowing; use unsigned compare to handle the negative case. |
1826 |
if( depth < ciKlass::primary_super_limit() ) { |
|
1827 |
// The field is an element of Klass::_primary_supers. Return its (constant) value. |
|
1828 |
// (Folds up type checking code.) |
|
1829 |
assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); |
|
1830 |
ciKlass *ss = klass->super_of_depth(depth); |
|
1831 |
return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; |
|
1832 |
} |
|
1833 |
const Type* aift = load_array_final_field(tkls, klass); |
|
1834 |
if (aift != NULL) return aift; |
|
1835 |
} |
|
1836 |
||
1837 |
// We can still check if we are loading from the primary_supers array at a |
|
1838 |
// shallow enough depth. Even though the klass is not exact, entries less |
|
1839 |
// than or equal to its super depth are correct. |
|
1840 |
if (klass->is_loaded() ) { |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13104
diff
changeset
|
1841 |
ciType *inner = klass; |
1 | 1842 |
while( inner->is_obj_array_klass() ) |
1843 |
inner = inner->as_obj_array_klass()->base_element_type(); |
|
1844 |
if( inner->is_instance_klass() && |
|
1845 |
!inner->as_instance_klass()->flags().is_interface() ) { |
|
1846 |
// Compute index into primary_supers array |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13104
diff
changeset
|
1847 |
juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); |
1 | 1848 |
// Check for overflowing; use unsigned compare to handle the negative case. |
1849 |
if( depth < ciKlass::primary_super_limit() && |
|
1850 |
depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case |
|
1851 |
// The field is an element of Klass::_primary_supers. Return its (constant) value. |
|
1852 |
// (Folds up type checking code.) |
|
1853 |
assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); |
|
1854 |
ciKlass *ss = klass->super_of_depth(depth); |
|
1855 |
return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; |
|
1856 |
} |
|
1857 |
} |
|
1858 |
} |
|
1859 |
||
1860 |
// If the type is enough to determine that the thing is not an array, |
|
1861 |
// we can give the layout_helper a positive interval type. |
|
1862 |
// This will help short-circuit some reflective code. |
|
11430
718fc06da49a
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
11200
diff
changeset
|
1863 |
if (tkls->offset() == in_bytes(Klass::layout_helper_offset()) |
1 | 1864 |
&& !klass->is_array_klass() // not directly typed as an array |
1865 |
&& !klass->is_interface() // specifically not Serializable & Cloneable |
|
1866 |
&& !klass->is_java_lang_Object() // not the supertype of all T[] |
|
1867 |
) { |
|
1868 |
// Note: When interfaces are reliable, we can narrow the interface |
|
1869 |
// test to (klass != Serializable && klass != Cloneable). |
|
1870 |
assert(Opcode() == Op_LoadI, "must load an int from _layout_helper"); |
|
1871 |
jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false); |
|
1872 |
// The key property of this type is that it folds up tests |
|
1873 |
// for array-ness, since it proves that the layout_helper is positive. |
|
1874 |
// Thus, a generic value like the basic object layout helper works fine. |
|
1875 |
return TypeInt::make(min_size, max_jint, Type::WidenMin); |
|
1876 |
} |
|
1877 |
} |
|
1878 |
||
1879 |
// If we are loading from a freshly-allocated object, produce a zero, |
|
1880 |
// if the load is provably beyond the header of the object. |
|
1881 |
// (Also allow a variable load from a fresh array to produce zero.) |
|
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1882 |
const TypeOopPtr *tinst = tp->isa_oopptr(); |
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1883 |
bool is_instance = (tinst != NULL) && tinst->is_known_instance_field(); |
17383 | 1884 |
bool is_boxed_value = (tinst != NULL) && tinst->is_ptr_to_boxed_value(); |
1885 |
if (ReduceFieldZeroing || is_instance || is_boxed_value) { |
|
1 | 1886 |
Node* value = can_see_stored_value(mem,phase); |
11562 | 1887 |
if (value != NULL && value->is_Con()) { |
1888 |
assert(value->bottom_type()->higher_equal(_type),"sanity"); |
|
1 | 1889 |
return value->bottom_type(); |
11562 | 1890 |
} |
1 | 1891 |
} |
1892 |
||
9327
23532ae85295
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
kvn
parents:
8884
diff
changeset
|
1893 |
if (is_instance) { |
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1894 |
// If we have an instance type and our memory input is the |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1895 |
// programs's initial memory state, there is no matching store, |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1896 |
// so just return a zero of the appropriate type |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1897 |
Node *mem = in(MemNode::Memory); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1898 |
if (mem->is_Parm() && mem->in(0)->is_Start()) { |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1899 |
assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm"); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1900 |
return Type::get_zero_type(_type->basic_type()); |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1901 |
} |
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
1902 |
} |
1 | 1903 |
return _type; |
1904 |
} |
|
1905 |
||
1906 |
//------------------------------match_edge------------------------------------- |
|
1907 |
// Do we Match on this edge index or not? Match only the address. |
|
1908 |
uint LoadNode::match_edge(uint idx) const { |
|
1909 |
return idx == MemNode::Address; |
|
1910 |
} |
|
1911 |
||
1912 |
//--------------------------LoadBNode::Ideal-------------------------------------- |
|
1913 |
// |
|
1914 |
// If the previous store is to the same address as this load, |
|
1915 |
// and the value stored was larger than a byte, replace this load |
|
1916 |
// with the value stored truncated to a byte. If no truncation is |
|
1917 |
// needed, the replacement is done in LoadNode::Identity(). |
|
1918 |
// |
|
1919 |
Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1920 |
Node* mem = in(MemNode::Memory); |
|
1921 |
Node* value = can_see_stored_value(mem,phase); |
|
1922 |
if( value && !phase->type(value)->higher_equal( _type ) ) { |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1923 |
Node *result = phase->transform( new LShiftINode(value, phase->intcon(24)) ); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1924 |
return new RShiftINode(result, phase->intcon(24)); |
1 | 1925 |
} |
1926 |
// Identity call will handle the case where truncation is not needed. |
|
1927 |
return LoadNode::Ideal(phase, can_reshape); |
|
1928 |
} |
|
1929 |
||
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
1930 |
const Type* LoadBNode::Value(PhaseGVN* phase) const { |
11562 | 1931 |
Node* mem = in(MemNode::Memory); |
1932 |
Node* value = can_see_stored_value(mem,phase); |
|
11568 | 1933 |
if (value != NULL && value->is_Con() && |
1934 |
!value->bottom_type()->higher_equal(_type)) { |
|
11562 | 1935 |
// If the input to the store does not fit with the load's result type, |
1936 |
// it must be truncated. We can't delay until Ideal call since |
|
1937 |
// a singleton Value is needed for split_thru_phi optimization. |
|
1938 |
int con = value->get_int(); |
|
1939 |
return TypeInt::make((con << 24) >> 24); |
|
1940 |
} |
|
1941 |
return LoadNode::Value(phase); |
|
1942 |
} |
|
1943 |
||
2150 | 1944 |
//--------------------------LoadUBNode::Ideal------------------------------------- |
1945 |
// |
|
1946 |
// If the previous store is to the same address as this load, |
|
1947 |
// and the value stored was larger than a byte, replace this load |
|
1948 |
// with the value stored truncated to a byte. If no truncation is |
|
1949 |
// needed, the replacement is done in LoadNode::Identity(). |
|
1950 |
// |
|
1951 |
Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) { |
|
1952 |
Node* mem = in(MemNode::Memory); |
|
1953 |
Node* value = can_see_stored_value(mem, phase); |
|
1954 |
if (value && !phase->type(value)->higher_equal(_type)) |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1955 |
return new AndINode(value, phase->intcon(0xFF)); |
2150 | 1956 |
// Identity call will handle the case where truncation is not needed. |
1957 |
return LoadNode::Ideal(phase, can_reshape); |
|
1958 |
} |
|
1959 |
||
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
1960 |
const Type* LoadUBNode::Value(PhaseGVN* phase) const { |
11562 | 1961 |
Node* mem = in(MemNode::Memory); |
1962 |
Node* value = can_see_stored_value(mem,phase); |
|
11568 | 1963 |
if (value != NULL && value->is_Con() && |
1964 |
!value->bottom_type()->higher_equal(_type)) { |
|
11562 | 1965 |
// If the input to the store does not fit with the load's result type, |
1966 |
// it must be truncated. We can't delay until Ideal call since |
|
1967 |
// a singleton Value is needed for split_thru_phi optimization. |
|
1968 |
int con = value->get_int(); |
|
1969 |
return TypeInt::make(con & 0xFF); |
|
1970 |
} |
|
1971 |
return LoadNode::Value(phase); |
|
1972 |
} |
|
1973 |
||
2022
28ce8115a91d
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
2017
diff
changeset
|
1974 |
//--------------------------LoadUSNode::Ideal------------------------------------- |
1 | 1975 |
// |
1976 |
// If the previous store is to the same address as this load, |
|
1977 |
// and the value stored was larger than a char, replace this load |
|
1978 |
// with the value stored truncated to a char. If no truncation is |
|
1979 |
// needed, the replacement is done in LoadNode::Identity(). |
|
1980 |
// |
|
2022
28ce8115a91d
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
2017
diff
changeset
|
1981 |
Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
1 | 1982 |
Node* mem = in(MemNode::Memory); |
1983 |
Node* value = can_see_stored_value(mem,phase); |
|
1984 |
if( value && !phase->type(value)->higher_equal( _type ) ) |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1985 |
return new AndINode(value,phase->intcon(0xFFFF)); |
1 | 1986 |
// Identity call will handle the case where truncation is not needed. |
1987 |
return LoadNode::Ideal(phase, can_reshape); |
|
1988 |
} |
|
1989 |
||
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
1990 |
const Type* LoadUSNode::Value(PhaseGVN* phase) const { |
11562 | 1991 |
Node* mem = in(MemNode::Memory); |
1992 |
Node* value = can_see_stored_value(mem,phase); |
|
11568 | 1993 |
if (value != NULL && value->is_Con() && |
1994 |
!value->bottom_type()->higher_equal(_type)) { |
|
11562 | 1995 |
// If the input to the store does not fit with the load's result type, |
1996 |
// it must be truncated. We can't delay until Ideal call since |
|
1997 |
// a singleton Value is needed for split_thru_phi optimization. |
|
1998 |
int con = value->get_int(); |
|
1999 |
return TypeInt::make(con & 0xFFFF); |
|
2000 |
} |
|
2001 |
return LoadNode::Value(phase); |
|
2002 |
} |
|
2003 |
||
1 | 2004 |
//--------------------------LoadSNode::Ideal-------------------------------------- |
2005 |
// |
|
2006 |
// If the previous store is to the same address as this load, |
|
2007 |
// and the value stored was larger than a short, replace this load |
|
2008 |
// with the value stored truncated to a short. If no truncation is |
|
2009 |
// needed, the replacement is done in LoadNode::Identity(). |
|
2010 |
// |
|
2011 |
Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
2012 |
Node* mem = in(MemNode::Memory); |
|
2013 |
Node* value = can_see_stored_value(mem,phase); |
|
2014 |
if( value && !phase->type(value)->higher_equal( _type ) ) { |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2015 |
Node *result = phase->transform( new LShiftINode(value, phase->intcon(16)) ); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2016 |
return new RShiftINode(result, phase->intcon(16)); |
1 | 2017 |
} |
2018 |
// Identity call will handle the case where truncation is not needed. |
|
2019 |
return LoadNode::Ideal(phase, can_reshape); |
|
2020 |
} |
|
2021 |
||
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2022 |
const Type* LoadSNode::Value(PhaseGVN* phase) const { |
11562 | 2023 |
Node* mem = in(MemNode::Memory); |
2024 |
Node* value = can_see_stored_value(mem,phase); |
|
11568 | 2025 |
if (value != NULL && value->is_Con() && |
2026 |
!value->bottom_type()->higher_equal(_type)) { |
|
11562 | 2027 |
// If the input to the store does not fit with the load's result type, |
2028 |
// it must be truncated. We can't delay until Ideal call since |
|
2029 |
// a singleton Value is needed for split_thru_phi optimization. |
|
2030 |
int con = value->get_int(); |
|
2031 |
return TypeInt::make((con << 16) >> 16); |
|
2032 |
} |
|
2033 |
return LoadNode::Value(phase); |
|
2034 |
} |
|
2035 |
||
1 | 2036 |
//============================================================================= |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2037 |
//----------------------------LoadKlassNode::make------------------------------ |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2038 |
// Polymorphic factory method: |
27637
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
2039 |
Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) { |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2040 |
// sanity check the alias category against the created node type |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13104
diff
changeset
|
2041 |
const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13104
diff
changeset
|
2042 |
assert(adr_type != NULL, "expecting TypeKlassPtr"); |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2043 |
#ifdef _LP64 |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2044 |
if (adr_type->is_ptr_to_narrowklass()) { |
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
18449
diff
changeset
|
2045 |
assert(UseCompressedClassPointers, "no compressed klasses"); |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2046 |
Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered)); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2047 |
return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2048 |
} |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2049 |
#endif |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2050 |
assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2051 |
return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered); |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2052 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2053 |
|
1 | 2054 |
//------------------------------Value------------------------------------------ |
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2055 |
const Type* LoadKlassNode::Value(PhaseGVN* phase) const { |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2056 |
return klass_value_common(phase); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2057 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2058 |
|
27637
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
2059 |
// In most cases, LoadKlassNode does not have the control input set. If the control |
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
2060 |
// input is set, it must not be removed (by LoadNode::Ideal()). |
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
2061 |
bool LoadKlassNode::can_remove_control() const { |
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
2062 |
return false; |
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
2063 |
} |
cf68c0af6882
8057622: java/util/stream/test/org/openjdk/tests/java/util/stream/InfiniteStreamWithLimitOpTest: SEGV inside compiled code (sparc)
zmajo
parents:
27421
diff
changeset
|
2064 |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2065 |
const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { |
1 | 2066 |
// Either input is TOP ==> the result is TOP |
2067 |
const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
2068 |
if (t1 == Type::TOP) return Type::TOP; |
|
2069 |
Node *adr = in(MemNode::Address); |
|
2070 |
const Type *t2 = phase->type( adr ); |
|
2071 |
if (t2 == Type::TOP) return Type::TOP; |
|
2072 |
const TypePtr *tp = t2->is_ptr(); |
|
2073 |
if (TypePtr::above_centerline(tp->ptr()) || |
|
2074 |
tp->ptr() == TypePtr::Null) return Type::TOP; |
|
2075 |
||
2076 |
// Return a more precise klass, if possible |
|
2077 |
const TypeInstPtr *tinst = tp->isa_instptr(); |
|
2078 |
if (tinst != NULL) { |
|
2079 |
ciInstanceKlass* ik = tinst->klass()->as_instance_klass(); |
|
2080 |
int offset = tinst->offset(); |
|
2081 |
if (ik == phase->C->env()->Class_klass() |
|
2082 |
&& (offset == java_lang_Class::klass_offset_in_bytes() || |
|
2083 |
offset == java_lang_Class::array_klass_offset_in_bytes())) { |
|
2084 |
// We are loading a special hidden field from a Class mirror object, |
|
2085 |
// the field which points to the VM's Klass metaobject. |
|
2086 |
ciType* t = tinst->java_mirror_type(); |
|
2087 |
// java_mirror_type returns non-null for compile-time Class constants. |
|
2088 |
if (t != NULL) { |
|
2089 |
// constant oop => constant klass |
|
2090 |
if (offset == java_lang_Class::array_klass_offset_in_bytes()) { |
|
22224
f51ba6b5d472
8029366: ShouldNotReachHere error when creating an array with component type of void
twisti
parents:
19995
diff
changeset
|
2091 |
if (t->is_void()) { |
f51ba6b5d472
8029366: ShouldNotReachHere error when creating an array with component type of void
twisti
parents:
19995
diff
changeset
|
2092 |
// We cannot create a void array. Since void is a primitive type return null |
f51ba6b5d472
8029366: ShouldNotReachHere error when creating an array with component type of void
twisti
parents:
19995
diff
changeset
|
2093 |
// klass. Users of this result need to do a null check on the returned klass. |
f51ba6b5d472
8029366: ShouldNotReachHere error when creating an array with component type of void
twisti
parents:
19995
diff
changeset
|
2094 |
return TypePtr::NULL_PTR; |
f51ba6b5d472
8029366: ShouldNotReachHere error when creating an array with component type of void
twisti
parents:
19995
diff
changeset
|
2095 |
} |
1 | 2096 |
return TypeKlassPtr::make(ciArrayKlass::make(t)); |
2097 |
} |
|
2098 |
if (!t->is_klass()) { |
|
2099 |
// a primitive Class (e.g., int.class) has NULL for a klass field |
|
2100 |
return TypePtr::NULL_PTR; |
|
2101 |
} |
|
2102 |
// (Folds up the 1st indirection in aClassConstant.getModifiers().) |
|
2103 |
return TypeKlassPtr::make(t->as_klass()); |
|
2104 |
} |
|
2105 |
// non-constant mirror, so we can't tell what's going on |
|
2106 |
} |
|
2107 |
if( !ik->is_loaded() ) |
|
2108 |
return _type; // Bail out if not loaded |
|
2109 |
if (offset == oopDesc::klass_offset_in_bytes()) { |
|
2110 |
if (tinst->klass_is_exact()) { |
|
2111 |
return TypeKlassPtr::make(ik); |
|
2112 |
} |
|
2113 |
// See if we can become precise: no subklasses and no interface |
|
2114 |
// (Note: We need to support verified interfaces.) |
|
2115 |
if (!ik->is_interface() && !ik->has_subklass()) { |
|
2116 |
//assert(!UseExactTypes, "this code should be useless with exact types"); |
|
2117 |
// Add a dependence; if any subclass added we need to recompile |
|
2118 |
if (!ik->is_final()) { |
|
2119 |
// %%% should use stronger assert_unique_concrete_subtype instead |
|
2120 |
phase->C->dependencies()->assert_leaf_type(ik); |
|
2121 |
} |
|
2122 |
// Return precise klass |
|
2123 |
return TypeKlassPtr::make(ik); |
|
2124 |
} |
|
2125 |
||
2126 |
// Return root of possible klass |
|
2127 |
return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/); |
|
2128 |
} |
|
2129 |
} |
|
2130 |
||
2131 |
// Check for loading klass from an array |
|
2132 |
const TypeAryPtr *tary = tp->isa_aryptr(); |
|
2133 |
if( tary != NULL ) { |
|
2134 |
ciKlass *tary_klass = tary->klass(); |
|
2135 |
if (tary_klass != NULL // can be NULL when at BOTTOM or TOP |
|
2136 |
&& tary->offset() == oopDesc::klass_offset_in_bytes()) { |
|
2137 |
if (tary->klass_is_exact()) { |
|
2138 |
return TypeKlassPtr::make(tary_klass); |
|
2139 |
} |
|
2140 |
ciArrayKlass *ak = tary->klass()->as_array_klass(); |
|
2141 |
// If the klass is an object array, we defer the question to the |
|
2142 |
// array component klass. |
|
2143 |
if( ak->is_obj_array_klass() ) { |
|
2144 |
assert( ak->is_loaded(), "" ); |
|
2145 |
ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass(); |
|
2146 |
if( base_k->is_loaded() && base_k->is_instance_klass() ) { |
|
2147 |
ciInstanceKlass* ik = base_k->as_instance_klass(); |
|
2148 |
// See if we can become precise: no subklasses and no interface |
|
2149 |
if (!ik->is_interface() && !ik->has_subklass()) { |
|
2150 |
//assert(!UseExactTypes, "this code should be useless with exact types"); |
|
2151 |
// Add a dependence; if any subclass added we need to recompile |
|
2152 |
if (!ik->is_final()) { |
|
2153 |
phase->C->dependencies()->assert_leaf_type(ik); |
|
2154 |
} |
|
2155 |
// Return precise array klass |
|
2156 |
return TypeKlassPtr::make(ak); |
|
2157 |
} |
|
2158 |
} |
|
2159 |
return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); |
|
2160 |
} else { // Found a type-array? |
|
2161 |
//assert(!UseExactTypes, "this code should be useless with exact types"); |
|
2162 |
assert( ak->is_type_array_klass(), "" ); |
|
2163 |
return TypeKlassPtr::make(ak); // These are always precise |
|
2164 |
} |
|
2165 |
} |
|
2166 |
} |
|
2167 |
||
2168 |
// Check for loading klass from an array klass |
|
2169 |
const TypeKlassPtr *tkls = tp->isa_klassptr(); |
|
2170 |
if (tkls != NULL && !StressReflectiveCode) { |
|
2171 |
ciKlass* klass = tkls->klass(); |
|
2172 |
if( !klass->is_loaded() ) |
|
2173 |
return _type; // Bail out if not loaded |
|
2174 |
if( klass->is_obj_array_klass() && |
|
13952
e3cf184080bc
8000213: NPG: Should have renamed arrayKlass and typeArrayKlass
coleenp
parents:
13895
diff
changeset
|
2175 |
tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) { |
1 | 2176 |
ciKlass* elem = klass->as_obj_array_klass()->element_klass(); |
2177 |
// // Always returning precise element type is incorrect, |
|
2178 |
// // e.g., element type could be object and array may contain strings |
|
2179 |
// return TypeKlassPtr::make(TypePtr::Constant, elem, 0); |
|
2180 |
||
2181 |
// The array's TypeKlassPtr was declared 'precise' or 'not precise' |
|
2182 |
// according to the element type's subclassing. |
|
2183 |
return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/); |
|
2184 |
} |
|
2185 |
if( klass->is_instance_klass() && tkls->klass_is_exact() && |
|
11430
718fc06da49a
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
11200
diff
changeset
|
2186 |
tkls->offset() == in_bytes(Klass::super_offset())) { |
1 | 2187 |
ciKlass* sup = klass->as_instance_klass()->super(); |
2188 |
// The field is Klass::_super. Return its (constant) value. |
|
2189 |
// (Folds up the 2nd indirection in aClassConstant.getSuperClass().) |
|
2190 |
return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR; |
|
2191 |
} |
|
2192 |
} |
|
2193 |
||
2194 |
// Bailout case |
|
2195 |
return LoadNode::Value(phase); |
|
2196 |
} |
|
2197 |
||
2198 |
//------------------------------Identity--------------------------------------- |
|
2199 |
// To clean up reflective code, simplify k.java_mirror.as_klass to plain k. |
|
2200 |
// Also feed through the klass in Allocate(...klass...)._klass. |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2201 |
Node* LoadKlassNode::Identity(PhaseGVN* phase) { |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2202 |
return klass_identity_common(phase); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2203 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2204 |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2205 |
Node* LoadNode::klass_identity_common(PhaseGVN* phase) { |
1 | 2206 |
Node* x = LoadNode::Identity(phase); |
2207 |
if (x != this) return x; |
|
2208 |
||
2209 |
// Take apart the address into an oop and and offset. |
|
2210 |
// Return 'this' if we cannot. |
|
2211 |
Node* adr = in(MemNode::Address); |
|
2212 |
intptr_t offset = 0; |
|
2213 |
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
2214 |
if (base == NULL) return this; |
|
2215 |
const TypeOopPtr* toop = phase->type(adr)->isa_oopptr(); |
|
2216 |
if (toop == NULL) return this; |
|
2217 |
||
51485
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
51482
diff
changeset
|
2218 |
// Step over potential GC barrier for OopHandle resolve |
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
51482
diff
changeset
|
2219 |
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
51482
diff
changeset
|
2220 |
if (bs->is_gc_barrier_node(base)) { |
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
51482
diff
changeset
|
2221 |
base = bs->step_over_gc_barrier(base); |
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
51482
diff
changeset
|
2222 |
} |
0c7040d1d1ca
8208601: Introduce native oop barriers in C2 for OopHandle
eosterlund
parents:
51482
diff
changeset
|
2223 |
|
1 | 2224 |
// We can fetch the klass directly through an AllocateNode. |
2225 |
// This works even if the klass is not constant (clone or newArray). |
|
2226 |
if (offset == oopDesc::klass_offset_in_bytes()) { |
|
2227 |
Node* allocated_klass = AllocateNode::Ideal_klass(base, phase); |
|
2228 |
if (allocated_klass != NULL) { |
|
2229 |
return allocated_klass; |
|
2230 |
} |
|
2231 |
} |
|
2232 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13104
diff
changeset
|
2233 |
// Simplify k.java_mirror.as_klass to plain k, where k is a Klass*. |
1 | 2234 |
// See inline_native_Class_query for occurrences of these patterns. |
2235 |
// Java Example: x.getClass().isAssignableFrom(y) |
|
2236 |
// |
|
2237 |
// This improves reflective code, often making the Class |
|
2238 |
// mirror go completely dead. (Current exception: Class |
|
2239 |
// mirrors may appear in debug info, but we could clean them out by |
|
47580 | 2240 |
// introducing a new debug info operator for Klass.java_mirror). |
2241 |
||
1 | 2242 |
if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass() |
26412
80741eb33ba2
8049105: Move array component mirror to instance of java/lang/Class (hotspot part 2)
coleenp
parents:
25930
diff
changeset
|
2243 |
&& offset == java_lang_Class::klass_offset_in_bytes()) { |
1 | 2244 |
if (base->is_Load()) { |
47580 | 2245 |
Node* base2 = base->in(MemNode::Address); |
49816 | 2246 |
if (base2->is_Load()) { /* direct load of a load which is the OopHandle */ |
47580 | 2247 |
Node* adr2 = base2->in(MemNode::Address); |
2248 |
const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); |
|
2249 |
if (tkls != NULL && !tkls->empty() |
|
2250 |
&& (tkls->klass()->is_instance_klass() || |
|
1 | 2251 |
tkls->klass()->is_array_klass()) |
47580 | 2252 |
&& adr2->is_AddP() |
2253 |
) { |
|
2254 |
int mirror_field = in_bytes(Klass::java_mirror_offset()); |
|
2255 |
if (tkls->offset() == mirror_field) { |
|
2256 |
return adr2->in(AddPNode::Base); |
|
2257 |
} |
|
1 | 2258 |
} |
2259 |
} |
|
2260 |
} |
|
2261 |
} |
|
2262 |
||
2263 |
return this; |
|
2264 |
} |
|
2265 |
||
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2266 |
|
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2267 |
//------------------------------Value------------------------------------------ |
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2268 |
const Type* LoadNKlassNode::Value(PhaseGVN* phase) const { |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2269 |
const Type *t = klass_value_common(phase); |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
2270 |
if (t == Type::TOP) |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
2271 |
return t; |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
2272 |
|
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2273 |
return t->make_narrowklass(); |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2274 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2275 |
|
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2276 |
//------------------------------Identity--------------------------------------- |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2277 |
// To clean up reflective code, simplify k.java_mirror.as_klass to narrow k. |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2278 |
// Also feed through the klass in Allocate(...klass...)._klass. |
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2279 |
Node* LoadNKlassNode::Identity(PhaseGVN* phase) { |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2280 |
Node *x = klass_identity_common(phase); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2281 |
|
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2282 |
const Type *t = phase->type( x ); |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2283 |
if( t == Type::TOP ) return x; |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2284 |
if( t->isa_narrowklass()) return x; |
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2285 |
assert (!t->isa_narrowoop(), "no narrow oop here"); |
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2286 |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2287 |
return phase->transform(new EncodePKlassNode(x, t->make_narrowklass())); |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2288 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2289 |
|
1 | 2290 |
//------------------------------Value----------------------------------------- |
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2291 |
const Type* LoadRangeNode::Value(PhaseGVN* phase) const { |
1 | 2292 |
// Either input is TOP ==> the result is TOP |
2293 |
const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
2294 |
if( t1 == Type::TOP ) return Type::TOP; |
|
2295 |
Node *adr = in(MemNode::Address); |
|
2296 |
const Type *t2 = phase->type( adr ); |
|
2297 |
if( t2 == Type::TOP ) return Type::TOP; |
|
2298 |
const TypePtr *tp = t2->is_ptr(); |
|
2299 |
if (TypePtr::above_centerline(tp->ptr())) return Type::TOP; |
|
2300 |
const TypeAryPtr *tap = tp->isa_aryptr(); |
|
2301 |
if( !tap ) return _type; |
|
2302 |
return tap->size(); |
|
2303 |
} |
|
2304 |
||
1398
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2305 |
//-------------------------------Ideal--------------------------------------- |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2306 |
// Feed through the length in AllocateArray(...length...)._length. |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2307 |
Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2308 |
Node* p = MemNode::Ideal_common(phase, can_reshape); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2309 |
if (p) return (p == NodeSentinel) ? NULL : p; |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2310 |
|
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2311 |
// Take apart the address into an oop and and offset. |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2312 |
// Return 'this' if we cannot. |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2313 |
Node* adr = in(MemNode::Address); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2314 |
intptr_t offset = 0; |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2315 |
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2316 |
if (base == NULL) return NULL; |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2317 |
const TypeAryPtr* tary = phase->type(adr)->isa_aryptr(); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2318 |
if (tary == NULL) return NULL; |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2319 |
|
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2320 |
// We can fetch the length directly through an AllocateArrayNode. |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2321 |
// This works even if the length is not constant (clone or newArray). |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2322 |
if (offset == arrayOopDesc::length_offset_in_bytes()) { |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2323 |
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2324 |
if (alloc != NULL) { |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2325 |
Node* allocated_length = alloc->Ideal_length(); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2326 |
Node* len = alloc->make_ideal_length(tary, phase); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2327 |
if (allocated_length != len) { |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2328 |
// New CastII improves on this. |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2329 |
return len; |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2330 |
} |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2331 |
} |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2332 |
} |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2333 |
|
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2334 |
return NULL; |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2335 |
} |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2336 |
|
1 | 2337 |
//------------------------------Identity--------------------------------------- |
2338 |
// Feed through the length in AllocateArray(...length...)._length. |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2339 |
Node* LoadRangeNode::Identity(PhaseGVN* phase) { |
1 | 2340 |
Node* x = LoadINode::Identity(phase); |
2341 |
if (x != this) return x; |
|
2342 |
||
2343 |
// Take apart the address into an oop and and offset. |
|
2344 |
// Return 'this' if we cannot. |
|
2345 |
Node* adr = in(MemNode::Address); |
|
2346 |
intptr_t offset = 0; |
|
2347 |
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
2348 |
if (base == NULL) return this; |
|
2349 |
const TypeAryPtr* tary = phase->type(adr)->isa_aryptr(); |
|
2350 |
if (tary == NULL) return this; |
|
2351 |
||
2352 |
// We can fetch the length directly through an AllocateArrayNode. |
|
2353 |
// This works even if the length is not constant (clone or newArray). |
|
2354 |
if (offset == arrayOopDesc::length_offset_in_bytes()) { |
|
1398
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2355 |
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2356 |
if (alloc != NULL) { |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2357 |
Node* allocated_length = alloc->Ideal_length(); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2358 |
// Do not allow make_ideal_length to allocate a CastII node. |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2359 |
Node* len = alloc->make_ideal_length(tary, phase, false); |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2360 |
if (allocated_length == len) { |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2361 |
// Return allocated_length only if it would not be improved by a CastII. |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2362 |
return allocated_length; |
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2363 |
} |
1 | 2364 |
} |
2365 |
} |
|
2366 |
||
2367 |
return this; |
|
2368 |
||
2369 |
} |
|
1398
342890a5d031
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
1067
diff
changeset
|
2370 |
|
1 | 2371 |
//============================================================================= |
2372 |
//---------------------------StoreNode::make----------------------------------- |
|
2373 |
// Polymorphic factory method: |
|
22845
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
2374 |
StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) { |
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
2375 |
assert((mo == unordered || mo == release), "unexpected"); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
2376 |
Compile* C = gvn.C; |
22845
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
2377 |
assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw || |
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
2378 |
ctl != NULL, "raw memory operations should have control edge"); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
2379 |
|
1 | 2380 |
switch (bt) { |
37480 | 2381 |
case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2382 |
case T_BYTE: return new StoreBNode(ctl, mem, adr, adr_type, val, mo); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2383 |
case T_INT: return new StoreINode(ctl, mem, adr, adr_type, val, mo); |
1 | 2384 |
case T_CHAR: |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2385 |
case T_SHORT: return new StoreCNode(ctl, mem, adr, adr_type, val, mo); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2386 |
case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2387 |
case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2388 |
case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo); |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13104
diff
changeset
|
2389 |
case T_METADATA: |
1 | 2390 |
case T_ADDRESS: |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
2391 |
case T_OBJECT: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
2392 |
#ifdef _LP64 |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2393 |
if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2394 |
val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop())); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2395 |
return new StoreNNode(ctl, mem, adr, adr_type, val, mo); |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2396 |
} else if (adr->bottom_type()->is_ptr_to_narrowklass() || |
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
18449
diff
changeset
|
2397 |
(UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2398 |
adr->bottom_type()->isa_rawptr())) { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2399 |
val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2400 |
return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo); |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2401 |
} |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
2402 |
#endif |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
2403 |
{ |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2404 |
return new StorePNode(ctl, mem, adr, adr_type, val, mo); |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
619
diff
changeset
|
2405 |
} |
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
2406 |
default: |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
2407 |
ShouldNotReachHere(); |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
2408 |
return (StoreNode*)NULL; |
1 | 2409 |
} |
2410 |
} |
|
2411 |
||
25930 | 2412 |
StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) { |
1 | 2413 |
bool require_atomic = true; |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2414 |
return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic); |
1 | 2415 |
} |
2416 |
||
25930 | 2417 |
StoreDNode* StoreDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) { |
24345
616bc709c0e4
8036851: volatile double accesses are not explicitly atomic in C2
anoll
parents:
24342
diff
changeset
|
2418 |
bool require_atomic = true; |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2419 |
return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic); |
24345
616bc709c0e4
8036851: volatile double accesses are not explicitly atomic in C2
anoll
parents:
24342
diff
changeset
|
2420 |
} |
616bc709c0e4
8036851: volatile double accesses are not explicitly atomic in C2
anoll
parents:
24342
diff
changeset
|
2421 |
|
1 | 2422 |
|
2423 |
//--------------------------bottom_type---------------------------------------- |
|
2424 |
const Type *StoreNode::bottom_type() const { |
|
2425 |
return Type::MEMORY; |
|
2426 |
} |
|
2427 |
||
2428 |
//------------------------------hash------------------------------------------- |
|
2429 |
uint StoreNode::hash() const { |
|
2430 |
// unroll addition of interesting fields |
|
2431 |
//return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn); |
|
2432 |
||
2433 |
// Since they are not commoned, do not hash them: |
|
2434 |
return NO_HASH; |
|
2435 |
} |
|
2436 |
||
2437 |
//------------------------------Ideal------------------------------------------ |
|
2438 |
// Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x). |
|
2439 |
// When a store immediately follows a relevant allocation/initialization, |
|
2440 |
// try to capture it into the initialization, or hoist it above. |
|
2441 |
Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
2442 |
Node* p = MemNode::Ideal_common(phase, can_reshape); |
|
2443 |
if (p) return (p == NodeSentinel) ? NULL : p; |
|
2444 |
||
2445 |
Node* mem = in(MemNode::Memory); |
|
2446 |
Node* address = in(MemNode::Address); |
|
9175
80221fe09772
7032963: StoreCM shouldn't participate in store elimination
never
parents:
8728
diff
changeset
|
2447 |
// Back-to-back stores to same address? Fold em up. Generally |
80221fe09772
7032963: StoreCM shouldn't participate in store elimination
never
parents:
8728
diff
changeset
|
2448 |
// unsafe if I have intervening uses... Also disallowed for StoreCM |
80221fe09772
7032963: StoreCM shouldn't participate in store elimination
never
parents:
8728
diff
changeset
|
2449 |
// since they must follow each StoreP operation. Redundant StoreCMs |
80221fe09772
7032963: StoreCM shouldn't participate in store elimination
never
parents:
8728
diff
changeset
|
2450 |
// are eliminated just before matching in final_graph_reshape. |
32372
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2451 |
{ |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2452 |
Node* st = mem; |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2453 |
// If Store 'st' has more than one use, we cannot fold 'st' away. |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2454 |
// For example, 'st' might be the final state at a conditional |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2455 |
// return. Or, 'st' might be used by some node which is live at |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2456 |
// the same time 'st' is live, which might be unschedulable. So, |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2457 |
// require exactly ONE user until such time as we clone 'mem' for |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2458 |
// each of 'mem's uses (thus making the exactly-1-user-rule hold |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2459 |
// true). |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2460 |
while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) { |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2461 |
// Looking at a dead closed cycle of memory? |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2462 |
assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2463 |
assert(Opcode() == st->Opcode() || |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2464 |
st->Opcode() == Op_StoreVector || |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2465 |
Opcode() == Op_StoreVector || |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2466 |
phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || |
34157
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
2467 |
(Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode |
46754
2e49bb347cbc
8182036: Load from initializing arraycopy uses wrong memory state
roland
parents:
46735
diff
changeset
|
2468 |
(Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy |
34157
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
2469 |
(is_mismatched_access() || st->as_Store()->is_mismatched_access()), |
33105
294e48b4f704
8080775: Better argument formatting for assert() and friends
david
parents:
32573
diff
changeset
|
2470 |
"no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]); |
32372
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2471 |
|
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2472 |
if (st->in(MemNode::Address)->eqv_uncast(address) && |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2473 |
st->as_Store()->memory_size() <= this->memory_size()) { |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2474 |
Node* use = st->raw_out(0); |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2475 |
phase->igvn_rehash_node_delayed(use); |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2476 |
if (can_reshape) { |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2477 |
use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN()); |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2478 |
} else { |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2479 |
// It's OK to do this in the parser, since DU info is always accurate, |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2480 |
// and the parser always refers to nodes via SafePointNode maps. |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2481 |
use->set_req(MemNode::Memory, st->in(MemNode::Memory)); |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2482 |
} |
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2483 |
return this; |
1 | 2484 |
} |
32372
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2485 |
st = st->in(MemNode::Memory); |
1 | 2486 |
} |
2487 |
} |
|
2488 |
||
32372
b82e88dcb26c
8080289: Intermediate writes in a loop not eliminated by optimizer
roland
parents:
32370
diff
changeset
|
2489 |
|
1 | 2490 |
// Capture an unaliased, unconditional, simple store into an initializer. |
2491 |
// Or, if it is independent of the allocation, hoist it above the allocation. |
|
2492 |
if (ReduceFieldZeroing && /*can_reshape &&*/ |
|
2493 |
mem->is_Proj() && mem->in(0)->is_Initialize()) { |
|
2494 |
InitializeNode* init = mem->in(0)->as_Initialize(); |
|
15813
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
2495 |
intptr_t offset = init->can_capture_store(this, phase, can_reshape); |
1 | 2496 |
if (offset > 0) { |
15813
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
2497 |
Node* moved = init->capture_store(this, offset, phase, can_reshape); |
1 | 2498 |
// If the InitializeNode captured me, it made a raw copy of me, |
2499 |
// and I need to disappear. |
|
2500 |
if (moved != NULL) { |
|
2501 |
// %%% hack to ensure that Ideal returns a new node: |
|
25930 | 2502 |
mem = MergeMemNode::make(mem); |
1 | 2503 |
return mem; // fold me away |
2504 |
} |
|
2505 |
} |
|
2506 |
} |
|
2507 |
||
2508 |
return NULL; // No further progress |
|
2509 |
} |
|
2510 |
||
2511 |
//------------------------------Value----------------------------------------- |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2512 |
const Type* StoreNode::Value(PhaseGVN* phase) const { |
1 | 2513 |
// Either input is TOP ==> the result is TOP |
2514 |
const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
2515 |
if( t1 == Type::TOP ) return Type::TOP; |
|
2516 |
const Type *t2 = phase->type( in(MemNode::Address) ); |
|
2517 |
if( t2 == Type::TOP ) return Type::TOP; |
|
2518 |
const Type *t3 = phase->type( in(MemNode::ValueIn) ); |
|
2519 |
if( t3 == Type::TOP ) return Type::TOP; |
|
2520 |
return Type::MEMORY; |
|
2521 |
} |
|
2522 |
||
2523 |
//------------------------------Identity--------------------------------------- |
|
2524 |
// Remove redundant stores: |
|
2525 |
// Store(m, p, Load(m, p)) changes to m. |
|
2526 |
// Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x). |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2527 |
Node* StoreNode::Identity(PhaseGVN* phase) { |
1 | 2528 |
Node* mem = in(MemNode::Memory); |
2529 |
Node* adr = in(MemNode::Address); |
|
2530 |
Node* val = in(MemNode::ValueIn); |
|
2531 |
||
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2532 |
Node* result = this; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2533 |
|
1 | 2534 |
// Load then Store? Then the Store is useless |
2535 |
if (val->is_Load() && |
|
11446 | 2536 |
val->in(MemNode::Address)->eqv_uncast(adr) && |
2537 |
val->in(MemNode::Memory )->eqv_uncast(mem) && |
|
1 | 2538 |
val->as_Load()->store_Opcode() == Opcode()) { |
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2539 |
result = mem; |
1 | 2540 |
} |
2541 |
||
2542 |
// Two stores in a row of the same value? |
|
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2543 |
if (result == this && |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2544 |
mem->is_Store() && |
11446 | 2545 |
mem->in(MemNode::Address)->eqv_uncast(adr) && |
2546 |
mem->in(MemNode::ValueIn)->eqv_uncast(val) && |
|
1 | 2547 |
mem->Opcode() == Opcode()) { |
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2548 |
result = mem; |
1 | 2549 |
} |
2550 |
||
2551 |
// Store of zero anywhere into a freshly-allocated object? |
|
2552 |
// Then the store is useless. |
|
2553 |
// (It must already have been captured by the InitializeNode.) |
|
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2554 |
if (result == this && |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2555 |
ReduceFieldZeroing && phase->type(val)->is_zero_type()) { |
1 | 2556 |
// a newly allocated object is already all-zeroes everywhere |
2557 |
if (mem->is_Proj() && mem->in(0)->is_Allocate()) { |
|
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2558 |
result = mem; |
1 | 2559 |
} |
2560 |
||
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2561 |
if (result == this) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2562 |
// the store may also apply to zero-bits in an earlier object |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2563 |
Node* prev_mem = find_previous_store(phase); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2564 |
// Steps (a), (b): Walk past independent stores to find an exact match. |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2565 |
if (prev_mem != NULL) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2566 |
Node* prev_val = can_see_stored_value(prev_mem, phase); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2567 |
if (prev_val != NULL && phase->eqv(prev_val, val)) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2568 |
// prev_val and val might differ by a cast; it would be good |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2569 |
// to keep the more informative of the two. |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2570 |
result = mem; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2571 |
} |
1 | 2572 |
} |
2573 |
} |
|
2574 |
} |
|
2575 |
||
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2576 |
if (result != this && phase->is_IterGVN() != NULL) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2577 |
MemBarNode* trailing = trailing_membar(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2578 |
if (trailing != NULL) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2579 |
#ifdef ASSERT |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2580 |
const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2581 |
assert(t_oop == NULL || t_oop->is_known_instance_field(), "only for non escaping objects"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2582 |
#endif |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2583 |
PhaseIterGVN* igvn = phase->is_IterGVN(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2584 |
trailing->remove(igvn); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2585 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2586 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2587 |
|
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2588 |
return result; |
1 | 2589 |
} |
2590 |
||
2591 |
//------------------------------match_edge------------------------------------- |
|
2592 |
// Do we Match on this edge index or not? Match only memory & value |
|
2593 |
uint StoreNode::match_edge(uint idx) const { |
|
2594 |
return idx == MemNode::Address || idx == MemNode::ValueIn; |
|
2595 |
} |
|
2596 |
||
2597 |
//------------------------------cmp-------------------------------------------- |
|
2598 |
// Do not common stores up together. They generally have to be split |
|
2599 |
// back up anyways, so do not bother. |
|
2600 |
uint StoreNode::cmp( const Node &n ) const { |
|
2601 |
return (&n == this); // Always fail except on self |
|
2602 |
} |
|
2603 |
||
2604 |
//------------------------------Ideal_masked_input----------------------------- |
|
2605 |
// Check for a useless mask before a partial-word store |
|
2606 |
// (StoreB ... (AndI valIn conIa) ) |
|
2607 |
// If (conIa & mask == mask) this simplifies to |
|
2608 |
// (StoreB ... (valIn) ) |
|
2609 |
Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) { |
|
2610 |
Node *val = in(MemNode::ValueIn); |
|
2611 |
if( val->Opcode() == Op_AndI ) { |
|
2612 |
const TypeInt *t = phase->type( val->in(2) )->isa_int(); |
|
2613 |
if( t && t->is_con() && (t->get_con() & mask) == mask ) { |
|
2614 |
set_req(MemNode::ValueIn, val->in(1)); |
|
2615 |
return this; |
|
2616 |
} |
|
2617 |
} |
|
2618 |
return NULL; |
|
2619 |
} |
|
2620 |
||
2621 |
||
2622 |
//------------------------------Ideal_sign_extended_input---------------------- |
|
2623 |
// Check for useless sign-extension before a partial-word store |
|
2624 |
// (StoreB ... (RShiftI _ (LShiftI _ valIn conIL ) conIR) ) |
|
2625 |
// If (conIL == conIR && conIR <= num_bits) this simplifies to |
|
2626 |
// (StoreB ... (valIn) ) |
|
2627 |
Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) { |
|
2628 |
Node *val = in(MemNode::ValueIn); |
|
2629 |
if( val->Opcode() == Op_RShiftI ) { |
|
2630 |
const TypeInt *t = phase->type( val->in(2) )->isa_int(); |
|
2631 |
if( t && t->is_con() && (t->get_con() <= num_bits) ) { |
|
2632 |
Node *shl = val->in(1); |
|
2633 |
if( shl->Opcode() == Op_LShiftI ) { |
|
2634 |
const TypeInt *t2 = phase->type( shl->in(2) )->isa_int(); |
|
2635 |
if( t2 && t2->is_con() && (t2->get_con() == t->get_con()) ) { |
|
2636 |
set_req(MemNode::ValueIn, shl->in(1)); |
|
2637 |
return this; |
|
2638 |
} |
|
2639 |
} |
|
2640 |
} |
|
2641 |
} |
|
2642 |
return NULL; |
|
2643 |
} |
|
2644 |
||
2645 |
//------------------------------value_never_loaded----------------------------------- |
|
2646 |
// Determine whether there are any possible loads of the value stored. |
|
2647 |
// For simplicity, we actually check if there are any loads from the |
|
2648 |
// address stored to, not just for loads of the value stored by this node. |
|
2649 |
// |
|
2650 |
bool StoreNode::value_never_loaded( PhaseTransform *phase) const { |
|
2651 |
Node *adr = in(Address); |
|
2652 |
const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr(); |
|
2653 |
if (adr_oop == NULL) |
|
2654 |
return false; |
|
769 | 2655 |
if (!adr_oop->is_known_instance_field()) |
1 | 2656 |
return false; // if not a distinct instance, there may be aliases of the address |
2657 |
for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) { |
|
2658 |
Node *use = adr->fast_out(i); |
|
2659 |
if (use->is_Load() || use->is_LoadStore()) { |
|
2660 |
return false; |
|
2661 |
} |
|
2662 |
} |
|
2663 |
return true; |
|
2664 |
} |
|
2665 |
||
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2666 |
MemBarNode* StoreNode::trailing_membar() const { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2667 |
if (is_release()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2668 |
MemBarNode* trailing_mb = NULL; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2669 |
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2670 |
Node* u = fast_out(i); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2671 |
if (u->is_MemBar()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2672 |
if (u->as_MemBar()->trailing_store()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2673 |
assert(u->Opcode() == Op_MemBarVolatile, ""); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2674 |
assert(trailing_mb == NULL, "only one"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2675 |
trailing_mb = u->as_MemBar(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2676 |
#ifdef ASSERT |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2677 |
Node* leading = u->as_MemBar()->leading_membar(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2678 |
assert(leading->Opcode() == Op_MemBarRelease, "incorrect membar"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2679 |
assert(leading->as_MemBar()->leading_store(), "incorrect membar pair"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2680 |
assert(leading->as_MemBar()->trailing_membar() == u, "incorrect membar pair"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2681 |
#endif |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2682 |
} else { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2683 |
assert(u->as_MemBar()->standalone(), ""); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2684 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2685 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2686 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2687 |
return trailing_mb; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2688 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2689 |
return NULL; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2690 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2691 |
|
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2692 |
|
1 | 2693 |
//============================================================================= |
2694 |
//------------------------------Ideal------------------------------------------ |
|
2695 |
// If the store is from an AND mask that leaves the low bits untouched, then |
|
2696 |
// we can skip the AND operation. If the store is from a sign-extension |
|
2697 |
// (a left shift, then right shift) we can skip both. |
|
2698 |
Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2699 |
Node *progress = StoreNode::Ideal_masked_input(phase, 0xFF); |
|
2700 |
if( progress != NULL ) return progress; |
|
2701 |
||
2702 |
progress = StoreNode::Ideal_sign_extended_input(phase, 24); |
|
2703 |
if( progress != NULL ) return progress; |
|
2704 |
||
2705 |
// Finally check the default case |
|
2706 |
return StoreNode::Ideal(phase, can_reshape); |
|
2707 |
} |
|
2708 |
||
2709 |
//============================================================================= |
|
2710 |
//------------------------------Ideal------------------------------------------ |
|
2711 |
// If the store is from an AND mask that leaves the low bits untouched, then |
|
2712 |
// we can skip the AND operation |
|
2713 |
Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2714 |
Node *progress = StoreNode::Ideal_masked_input(phase, 0xFFFF); |
|
2715 |
if( progress != NULL ) return progress; |
|
2716 |
||
2717 |
progress = StoreNode::Ideal_sign_extended_input(phase, 16); |
|
2718 |
if( progress != NULL ) return progress; |
|
2719 |
||
2720 |
// Finally check the default case |
|
2721 |
return StoreNode::Ideal(phase, can_reshape); |
|
2722 |
} |
|
2723 |
||
2724 |
//============================================================================= |
|
2725 |
//------------------------------Identity--------------------------------------- |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2726 |
Node* StoreCMNode::Identity(PhaseGVN* phase) { |
1 | 2727 |
// No need to card mark when storing a null ptr |
2728 |
Node* my_store = in(MemNode::OopStore); |
|
2729 |
if (my_store->is_Store()) { |
|
2730 |
const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) ); |
|
2731 |
if( t1 == TypePtr::NULL_PTR ) { |
|
2732 |
return in(MemNode::Memory); |
|
2733 |
} |
|
2734 |
} |
|
2735 |
return this; |
|
2736 |
} |
|
2737 |
||
3904
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2738 |
//============================================================================= |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2739 |
//------------------------------Ideal--------------------------------------- |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2740 |
Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2741 |
Node* progress = StoreNode::Ideal(phase, can_reshape); |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2742 |
if (progress != NULL) return progress; |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2743 |
|
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2744 |
Node* my_store = in(MemNode::OopStore); |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2745 |
if (my_store->is_MergeMem()) { |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2746 |
Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx()); |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2747 |
set_req(MemNode::OopStore, mem); |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2748 |
return this; |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2749 |
} |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2750 |
|
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2751 |
return NULL; |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2752 |
} |
007a45522a7f
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
2532
diff
changeset
|
2753 |
|
1 | 2754 |
//------------------------------Value----------------------------------------- |
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2755 |
const Type* StoreCMNode::Value(PhaseGVN* phase) const { |
216 | 2756 |
// Either input is TOP ==> the result is TOP |
2757 |
const Type *t = phase->type( in(MemNode::Memory) ); |
|
2758 |
if( t == Type::TOP ) return Type::TOP; |
|
2759 |
t = phase->type( in(MemNode::Address) ); |
|
2760 |
if( t == Type::TOP ) return Type::TOP; |
|
2761 |
t = phase->type( in(MemNode::ValueIn) ); |
|
2762 |
if( t == Type::TOP ) return Type::TOP; |
|
1 | 2763 |
// If extra input is TOP ==> the result is TOP |
216 | 2764 |
t = phase->type( in(MemNode::OopStore) ); |
2765 |
if( t == Type::TOP ) return Type::TOP; |
|
1 | 2766 |
|
2767 |
return StoreNode::Value( phase ); |
|
2768 |
} |
|
2769 |
||
2770 |
||
2771 |
//============================================================================= |
|
2772 |
//----------------------------------SCMemProjNode------------------------------ |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2773 |
const Type* SCMemProjNode::Value(PhaseGVN* phase) const |
1 | 2774 |
{ |
2775 |
return bottom_type(); |
|
2776 |
} |
|
2777 |
||
2778 |
//============================================================================= |
|
13886
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2779 |
//----------------------------------LoadStoreNode------------------------------ |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2780 |
LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ) |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2781 |
: Node(required), |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2782 |
_type(rt), |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2783 |
_adr_type(at) |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2784 |
{ |
1 | 2785 |
init_req(MemNode::Control, c ); |
2786 |
init_req(MemNode::Memory , mem); |
|
2787 |
init_req(MemNode::Address, adr); |
|
2788 |
init_req(MemNode::ValueIn, val); |
|
2789 |
init_class_id(Class_LoadStore); |
|
13886
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2790 |
} |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2791 |
|
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2792 |
uint LoadStoreNode::ideal_reg() const { |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2793 |
return _type->ideal_reg(); |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2794 |
} |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2795 |
|
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2796 |
bool LoadStoreNode::result_not_used() const { |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2797 |
for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) { |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2798 |
Node *x = fast_out(i); |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2799 |
if (x->Opcode() == Op_SCMemProj) continue; |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2800 |
return false; |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2801 |
} |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2802 |
return true; |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2803 |
} |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2804 |
|
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2805 |
MemBarNode* LoadStoreNode::trailing_membar() const { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2806 |
MemBarNode* trailing = NULL; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2807 |
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2808 |
Node* u = fast_out(i); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2809 |
if (u->is_MemBar()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2810 |
if (u->as_MemBar()->trailing_load_store()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2811 |
assert(u->Opcode() == Op_MemBarAcquire, ""); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2812 |
assert(trailing == NULL, "only one"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2813 |
trailing = u->as_MemBar(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2814 |
#ifdef ASSERT |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2815 |
Node* leading = trailing->leading_membar(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2816 |
assert(support_IRIW_for_not_multiple_copy_atomic_cpu || leading->Opcode() == Op_MemBarRelease, "incorrect membar"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2817 |
assert(leading->as_MemBar()->leading_load_store(), "incorrect membar pair"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2818 |
assert(leading->as_MemBar()->trailing_membar() == trailing, "incorrect membar pair"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2819 |
#endif |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2820 |
} else { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2821 |
assert(u->as_MemBar()->standalone(), "wrong barrier kind"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2822 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2823 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2824 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2825 |
|
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2826 |
return trailing; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2827 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
2828 |
|
13886
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2829 |
uint LoadStoreNode::size_of() const { return sizeof(*this); } |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2830 |
|
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2831 |
//============================================================================= |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2832 |
//----------------------------------LoadStoreConditionalNode-------------------- |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2833 |
LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) { |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2834 |
init_req(ExpectedIn, ex ); |
1 | 2835 |
} |
2836 |
||
2837 |
//============================================================================= |
|
2838 |
//-------------------------------adr_type-------------------------------------- |
|
2839 |
const TypePtr* ClearArrayNode::adr_type() const { |
|
2840 |
Node *adr = in(3); |
|
27697
ae60f551e5c8
8062258: compiler/debug/TraceIterativeGVN.java segfaults in trace_PhaseIterGVN
vlivanov
parents:
27637
diff
changeset
|
2841 |
if (adr == NULL) return NULL; // node is dead |
1 | 2842 |
return MemNode::calculate_adr_type(adr->bottom_type()); |
2843 |
} |
|
2844 |
||
2845 |
//------------------------------match_edge------------------------------------- |
|
2846 |
// Do we Match on this edge index or not? Do not match memory |
|
2847 |
uint ClearArrayNode::match_edge(uint idx) const { |
|
2848 |
return idx > 1; |
|
2849 |
} |
|
2850 |
||
2851 |
//------------------------------Identity--------------------------------------- |
|
2852 |
// Clearing a zero length array does nothing |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
2853 |
Node* ClearArrayNode::Identity(PhaseGVN* phase) { |
241
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2854 |
return phase->type(in(2))->higher_equal(TypeX::ZERO) ? in(1) : this; |
1 | 2855 |
} |
2856 |
||
2857 |
//------------------------------Idealize--------------------------------------- |
|
2858 |
// Clearing a short array is faster with stores |
|
42063
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
40871
diff
changeset
|
2859 |
Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
36554
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
2860 |
// Already know this is a large node, do not try to ideal it |
42063
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
40871
diff
changeset
|
2861 |
if (!IdealizeClearArrayNode || _is_large) return NULL; |
36554
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
2862 |
|
1 | 2863 |
const int unit = BytesPerLong; |
2864 |
const TypeX* t = phase->type(in(2))->isa_intptr_t(); |
|
2865 |
if (!t) return NULL; |
|
2866 |
if (!t->is_con()) return NULL; |
|
2867 |
intptr_t raw_count = t->get_con(); |
|
2868 |
intptr_t size = raw_count; |
|
2869 |
if (!Matcher::init_array_count_is_in_bytes) size *= unit; |
|
2870 |
// Clearing nothing uses the Identity call. |
|
2871 |
// Negative clears are possible on dead ClearArrays |
|
2872 |
// (see jck test stmt114.stmt11402.val). |
|
2873 |
if (size <= 0 || size % unit != 0) return NULL; |
|
2874 |
intptr_t count = size / unit; |
|
36554
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
2875 |
// Length too long; communicate this to matchers and assemblers. |
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
2876 |
// Assemblers are responsible to produce fast hardware clears for it. |
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
2877 |
if (size > InitArrayShortSize) { |
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
2878 |
return new ClearArrayNode(in(0), in(1), in(2), in(3), true); |
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
2879 |
} |
1 | 2880 |
Node *mem = in(1); |
2881 |
if( phase->type(mem)==Type::TOP ) return NULL; |
|
2882 |
Node *adr = in(3); |
|
2883 |
const Type* at = phase->type(adr); |
|
2884 |
if( at==Type::TOP ) return NULL; |
|
2885 |
const TypePtr* atp = at->isa_ptr(); |
|
2886 |
// adjust atp to be the correct array element address type |
|
2887 |
if (atp == NULL) atp = TypePtr::BOTTOM; |
|
2888 |
else atp = atp->add_offset(Type::OffsetBot); |
|
2889 |
// Get base for derived pointer purposes |
|
2890 |
if( adr->Opcode() != Op_AddP ) Unimplemented(); |
|
2891 |
Node *base = adr->in(1); |
|
2892 |
||
2893 |
Node *zero = phase->makecon(TypeLong::ZERO); |
|
2894 |
Node *off = phase->MakeConX(BytesPerLong); |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2895 |
mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); |
1 | 2896 |
count--; |
2897 |
while( count-- ) { |
|
2898 |
mem = phase->transform(mem); |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2899 |
adr = phase->transform(new AddPNode(base,adr,off)); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2900 |
mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); |
1 | 2901 |
} |
2902 |
return mem; |
|
2903 |
} |
|
2904 |
||
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2905 |
//----------------------------step_through---------------------------------- |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2906 |
// Return allocation input memory edge if it is different instance |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2907 |
// or itself if it is the one we are looking for. |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2908 |
bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) { |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2909 |
Node* n = *np; |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2910 |
assert(n->is_ClearArray(), "sanity"); |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2911 |
intptr_t offset; |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2912 |
AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset); |
26435
b446202ac824
8055910: closed/java/util/Collections/CheckedCollections.java failed with ClassCastException not thrown
roland
parents:
25930
diff
changeset
|
2913 |
// This method is called only before Allocate nodes are expanded |
b446202ac824
8055910: closed/java/util/Collections/CheckedCollections.java failed with ClassCastException not thrown
roland
parents:
25930
diff
changeset
|
2914 |
// during macro nodes expansion. Before that ClearArray nodes are |
b446202ac824
8055910: closed/java/util/Collections/CheckedCollections.java failed with ClassCastException not thrown
roland
parents:
25930
diff
changeset
|
2915 |
// only generated in PhaseMacroExpand::generate_arraycopy() (before |
b446202ac824
8055910: closed/java/util/Collections/CheckedCollections.java failed with ClassCastException not thrown
roland
parents:
25930
diff
changeset
|
2916 |
// Allocate nodes are expanded) which follows allocations. |
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2917 |
assert(alloc != NULL, "should have allocation"); |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2918 |
if (alloc->_idx == instance_id) { |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2919 |
// Can not bypass initialization of the instance we are looking for. |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2920 |
return false; |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2921 |
} |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2922 |
// Otherwise skip it. |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2923 |
InitializeNode* init = alloc->initialization(); |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2924 |
if (init != NULL) |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2925 |
*np = init->in(TypeFunc::Memory); |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2926 |
else |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2927 |
*np = alloc->in(TypeFunc::Memory); |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2928 |
return true; |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2929 |
} |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
2930 |
|
1 | 2931 |
//----------------------------clear_memory------------------------------------- |
2932 |
// Generate code to initialize object storage to zero. |
|
2933 |
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2934 |
intptr_t start_offset, |
|
2935 |
Node* end_offset, |
|
2936 |
PhaseGVN* phase) { |
|
2937 |
intptr_t offset = start_offset; |
|
2938 |
||
2939 |
int unit = BytesPerLong; |
|
2940 |
if ((offset % unit) != 0) { |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2941 |
Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset)); |
1 | 2942 |
adr = phase->transform(adr); |
2943 |
const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
22845
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
2944 |
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); |
1 | 2945 |
mem = phase->transform(mem); |
2946 |
offset += BytesPerInt; |
|
2947 |
} |
|
2948 |
assert((offset % unit) == 0, ""); |
|
2949 |
||
2950 |
// Initialize the remaining stuff, if any, with a ClearArray. |
|
2951 |
return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase); |
|
2952 |
} |
|
2953 |
||
2954 |
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2955 |
Node* start_offset, |
|
2956 |
Node* end_offset, |
|
2957 |
PhaseGVN* phase) { |
|
241
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2958 |
if (start_offset == end_offset) { |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2959 |
// nothing to do |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2960 |
return mem; |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2961 |
} |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2962 |
|
1 | 2963 |
int unit = BytesPerLong; |
2964 |
Node* zbase = start_offset; |
|
2965 |
Node* zend = end_offset; |
|
2966 |
||
2967 |
// Scale to the unit required by the CPU: |
|
2968 |
if (!Matcher::init_array_count_is_in_bytes) { |
|
2969 |
Node* shift = phase->intcon(exact_log2(unit)); |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2970 |
zbase = phase->transform(new URShiftXNode(zbase, shift) ); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2971 |
zend = phase->transform(new URShiftXNode(zend, shift) ); |
1 | 2972 |
} |
2973 |
||
15114
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
13974
diff
changeset
|
2974 |
// Bulk clear double-words |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2975 |
Node* zsize = phase->transform(new SubXNode(zend, zbase) ); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2976 |
Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) ); |
36554
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
2977 |
mem = new ClearArrayNode(ctl, mem, zsize, adr, false); |
1 | 2978 |
return phase->transform(mem); |
2979 |
} |
|
2980 |
||
2981 |
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2982 |
intptr_t start_offset, |
|
2983 |
intptr_t end_offset, |
|
2984 |
PhaseGVN* phase) { |
|
241
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2985 |
if (start_offset == end_offset) { |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2986 |
// nothing to do |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2987 |
return mem; |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2988 |
} |
96ca4f142c22
6676841: ClearArrayNode::Identity is incorrect for 64-bit
never
parents:
237
diff
changeset
|
2989 |
|
1 | 2990 |
assert((end_offset % BytesPerInt) == 0, "odd end offset"); |
2991 |
intptr_t done_offset = end_offset; |
|
2992 |
if ((done_offset % BytesPerLong) != 0) { |
|
2993 |
done_offset -= BytesPerInt; |
|
2994 |
} |
|
2995 |
if (done_offset > start_offset) { |
|
2996 |
mem = clear_memory(ctl, mem, dest, |
|
2997 |
start_offset, phase->MakeConX(done_offset), phase); |
|
2998 |
} |
|
2999 |
if (done_offset < end_offset) { // emit the final 32-bit store |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3000 |
Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset)); |
1 | 3001 |
adr = phase->transform(adr); |
3002 |
const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
22845
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
3003 |
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); |
1 | 3004 |
mem = phase->transform(mem); |
3005 |
done_offset += BytesPerInt; |
|
3006 |
} |
|
3007 |
assert(done_offset == end_offset, ""); |
|
3008 |
return mem; |
|
3009 |
} |
|
3010 |
||
3011 |
//============================================================================= |
|
3012 |
MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) |
|
3013 |
: MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)), |
|
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3014 |
_adr_type(C->get_adr_type(alias_idx)), _kind(Standalone) |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3015 |
#ifdef ASSERT |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3016 |
, _pair_idx(0) |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3017 |
#endif |
1 | 3018 |
{ |
3019 |
init_class_id(Class_MemBar); |
|
3020 |
Node* top = C->top(); |
|
3021 |
init_req(TypeFunc::I_O,top); |
|
3022 |
init_req(TypeFunc::FramePtr,top); |
|
3023 |
init_req(TypeFunc::ReturnAdr,top); |
|
3024 |
if (precedent != NULL) |
|
3025 |
init_req(TypeFunc::Parms, precedent); |
|
3026 |
} |
|
3027 |
||
3028 |
//------------------------------cmp-------------------------------------------- |
|
3029 |
uint MemBarNode::hash() const { return NO_HASH; } |
|
3030 |
uint MemBarNode::cmp( const Node &n ) const { |
|
3031 |
return (&n == this); // Always fail except on self |
|
3032 |
} |
|
3033 |
||
3034 |
//------------------------------make------------------------------------------- |
|
3035 |
MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { |
|
3036 |
switch (opcode) { |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3037 |
case Op_MemBarAcquire: return new MemBarAcquireNode(C, atp, pn); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3038 |
case Op_LoadFence: return new LoadFenceNode(C, atp, pn); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3039 |
case Op_MemBarRelease: return new MemBarReleaseNode(C, atp, pn); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3040 |
case Op_StoreFence: return new StoreFenceNode(C, atp, pn); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3041 |
case Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3042 |
case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3043 |
case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3044 |
case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); |
38017
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36840
diff
changeset
|
3045 |
case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3046 |
case Op_Initialize: return new InitializeNode(C, atp, pn); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3047 |
case Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn); |
22855
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22845
diff
changeset
|
3048 |
default: ShouldNotReachHere(); return NULL; |
1 | 3049 |
} |
3050 |
} |
|
3051 |
||
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3052 |
void MemBarNode::remove(PhaseIterGVN *igvn) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3053 |
if (outcnt() != 2) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3054 |
return; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3055 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3056 |
if (trailing_store() || trailing_load_store()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3057 |
MemBarNode* leading = leading_membar(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3058 |
if (leading != NULL) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3059 |
assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3060 |
leading->remove(igvn); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3061 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3062 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3063 |
igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3064 |
igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3065 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3066 |
|
1 | 3067 |
//------------------------------Ideal------------------------------------------ |
3068 |
// Return a node which is more "ideal" than the current node. Strip out |
|
3069 |
// control copies |
|
3070 |
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
3071 |
if (remove_dead_region(phase, can_reshape)) return this; |
11191 | 3072 |
// Don't bother trying to transform a dead node |
18105
dc31f0146a53
8001345: VM crashes with assert(n->outcnt() != 0 || C->top() == n || n->is_Proj()) failed: No dead instructions after post-alloc
adlertz
parents:
17383
diff
changeset
|
3073 |
if (in(0) && in(0)->is_top()) { |
dc31f0146a53
8001345: VM crashes with assert(n->outcnt() != 0 || C->top() == n || n->is_Proj()) failed: No dead instructions after post-alloc
adlertz
parents:
17383
diff
changeset
|
3074 |
return NULL; |
dc31f0146a53
8001345: VM crashes with assert(n->outcnt() != 0 || C->top() == n || n->is_Proj()) failed: No dead instructions after post-alloc
adlertz
parents:
17383
diff
changeset
|
3075 |
} |
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
3076 |
|
50525
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3077 |
#if INCLUDE_ZGC |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3078 |
if (UseZGC) { |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3079 |
if (req() == (Precedent+1) && in(MemBarNode::Precedent)->in(0) != NULL && in(MemBarNode::Precedent)->in(0)->is_LoadBarrier()) { |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3080 |
Node* load_node = in(MemBarNode::Precedent)->in(0)->in(LoadBarrierNode::Oop); |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3081 |
set_req(MemBarNode::Precedent, load_node); |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3082 |
return this; |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3083 |
} |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3084 |
} |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3085 |
#endif |
767cdb97f103
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
pliden
parents:
49816
diff
changeset
|
3086 |
|
25913
81dbc151e91c
8040213: C2 does not put all modified nodes on IGVN worklist
thartmann
parents:
24923
diff
changeset
|
3087 |
bool progress = false; |
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
3088 |
// Eliminate volatile MemBars for scalar replaced objects. |
17383 | 3089 |
if (can_reshape && req() == (Precedent+1)) { |
3090 |
bool eliminate = false; |
|
3091 |
int opc = Opcode(); |
|
3092 |
if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) { |
|
3093 |
// Volatile field loads and stores. |
|
3094 |
Node* my_mem = in(MemBarNode::Precedent); |
|
18105
dc31f0146a53
8001345: VM crashes with assert(n->outcnt() != 0 || C->top() == n || n->is_Proj()) failed: No dead instructions after post-alloc
adlertz
parents:
17383
diff
changeset
|
3095 |
// The MembarAquire may keep an unused LoadNode alive through the Precedent edge |
dc31f0146a53
8001345: VM crashes with assert(n->outcnt() != 0 || C->top() == n || n->is_Proj()) failed: No dead instructions after post-alloc
adlertz
parents:
17383
diff
changeset
|
3096 |
if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { |
18449 | 3097 |
// if the Precedent is a decodeN and its input (a Load) is used at more than one place, |
3098 |
// replace this Precedent (decodeN) with the Load instead. |
|
3099 |
if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { |
|
3100 |
Node* load_node = my_mem->in(1); |
|
3101 |
set_req(MemBarNode::Precedent, load_node); |
|
3102 |
phase->is_IterGVN()->_worklist.push(my_mem); |
|
3103 |
my_mem = load_node; |
|
3104 |
} else { |
|
3105 |
assert(my_mem->unique_out() == this, "sanity"); |
|
3106 |
del_req(Precedent); |
|
3107 |
phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later |
|
3108 |
my_mem = NULL; |
|
3109 |
} |
|
25913
81dbc151e91c
8040213: C2 does not put all modified nodes on IGVN worklist
thartmann
parents:
24923
diff
changeset
|
3110 |
progress = true; |
18105
dc31f0146a53
8001345: VM crashes with assert(n->outcnt() != 0 || C->top() == n || n->is_Proj()) failed: No dead instructions after post-alloc
adlertz
parents:
17383
diff
changeset
|
3111 |
} |
17383 | 3112 |
if (my_mem != NULL && my_mem->is_Mem()) { |
3113 |
const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr(); |
|
3114 |
// Check for scalar replaced object reference. |
|
3115 |
if( t_oop != NULL && t_oop->is_known_instance_field() && |
|
3116 |
t_oop->offset() != Type::OffsetBot && |
|
3117 |
t_oop->offset() != Type::OffsetTop) { |
|
3118 |
eliminate = true; |
|
3119 |
} |
|
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
3120 |
} |
17383 | 3121 |
} else if (opc == Op_MemBarRelease) { |
3122 |
// Final field stores. |
|
3123 |
Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); |
|
3124 |
if ((alloc != NULL) && alloc->is_Allocate() && |
|
33077
55f205e96044
8136596: Remove aarch64: MemBarRelease when final field's allocation is NoEscape or ArgEscape
roland
parents:
32573
diff
changeset
|
3125 |
alloc->as_Allocate()->does_not_escape_thread()) { |
17383 | 3126 |
// The allocated object does not escape. |
3127 |
eliminate = true; |
|
3128 |
} |
|
3129 |
} |
|
3130 |
if (eliminate) { |
|
3131 |
// Replace MemBar projections by its inputs. |
|
3132 |
PhaseIterGVN* igvn = phase->is_IterGVN(); |
|
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3133 |
remove(igvn); |
17383 | 3134 |
// Must return either the original node (now dead) or a new node |
3135 |
// (Do not return a top here, since that would break the uniqueness of top.) |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3136 |
return new ConINode(TypeInt::ZERO); |
4470
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
3137 |
} |
1e6edcab3109
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
4450
diff
changeset
|
3138 |
} |
25913
81dbc151e91c
8040213: C2 does not put all modified nodes on IGVN worklist
thartmann
parents:
24923
diff
changeset
|
3139 |
return progress ? this : NULL; |
1 | 3140 |
} |
3141 |
||
3142 |
//------------------------------Value------------------------------------------ |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
3143 |
const Type* MemBarNode::Value(PhaseGVN* phase) const { |
1 | 3144 |
if( !in(0) ) return Type::TOP; |
3145 |
if( phase->type(in(0)) == Type::TOP ) |
|
3146 |
return Type::TOP; |
|
3147 |
return TypeTuple::MEMBAR; |
|
3148 |
} |
|
3149 |
||
3150 |
//------------------------------match------------------------------------------ |
|
3151 |
// Construct projections for memory. |
|
3152 |
Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) { |
|
3153 |
switch (proj->_con) { |
|
3154 |
case TypeFunc::Control: |
|
3155 |
case TypeFunc::Memory: |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3156 |
return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); |
1 | 3157 |
} |
3158 |
ShouldNotReachHere(); |
|
3159 |
return NULL; |
|
3160 |
} |
|
3161 |
||
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3162 |
void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3163 |
trailing->_kind = TrailingStore; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3164 |
leading->_kind = LeadingStore; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3165 |
#ifdef ASSERT |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3166 |
trailing->_pair_idx = leading->_idx; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3167 |
leading->_pair_idx = leading->_idx; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3168 |
#endif |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3169 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3170 |
|
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3171 |
void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3172 |
trailing->_kind = TrailingLoadStore; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3173 |
leading->_kind = LeadingLoadStore; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3174 |
#ifdef ASSERT |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3175 |
trailing->_pair_idx = leading->_idx; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3176 |
leading->_pair_idx = leading->_idx; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3177 |
#endif |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3178 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3179 |
|
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3180 |
MemBarNode* MemBarNode::trailing_membar() const { |
52093
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3181 |
ResourceMark rm; |
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3182 |
Node* trailing = (Node*)this; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3183 |
VectorSet seen(Thread::current()->resource_area()); |
52093
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3184 |
Node_Stack multis(0); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3185 |
do { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3186 |
Node* c = trailing; |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3187 |
uint i = 0; |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3188 |
do { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3189 |
trailing = NULL; |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3190 |
for (; i < c->outcnt(); i++) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3191 |
Node* next = c->raw_out(i); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3192 |
if (next != c && next->is_CFG()) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3193 |
if (c->is_MultiBranch()) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3194 |
if (multis.node() == c) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3195 |
multis.set_index(i+1); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3196 |
} else { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3197 |
multis.push(c, i+1); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3198 |
} |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3199 |
} |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3200 |
trailing = next; |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3201 |
break; |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3202 |
} |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3203 |
} |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3204 |
if (trailing != NULL && !seen.test_set(trailing->_idx)) { |
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3205 |
break; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3206 |
} |
52093
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3207 |
while (multis.size() > 0) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3208 |
c = multis.node(); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3209 |
i = multis.index(); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3210 |
if (i < c->req()) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3211 |
break; |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3212 |
} |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3213 |
multis.pop(); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3214 |
} |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3215 |
} while (multis.size() > 0); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3216 |
} while (!trailing->is_MemBar() || !trailing->as_MemBar()->trailing()); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3217 |
|
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3218 |
MemBarNode* mb = trailing->as_MemBar(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3219 |
assert((mb->_kind == TrailingStore && _kind == LeadingStore) || |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3220 |
(mb->_kind == TrailingLoadStore && _kind == LeadingLoadStore), "bad trailing membar"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3221 |
assert(mb->_pair_idx == _pair_idx, "bad trailing membar"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3222 |
return mb; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3223 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3224 |
|
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3225 |
MemBarNode* MemBarNode::leading_membar() const { |
52093
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3226 |
ResourceMark rm; |
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3227 |
VectorSet seen(Thread::current()->resource_area()); |
52093
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3228 |
Node_Stack regions(0); |
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3229 |
Node* leading = in(0); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3230 |
while (leading != NULL && (!leading->is_MemBar() || !leading->as_MemBar()->leading())) { |
52093
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3231 |
while (leading == NULL || leading->is_top() || seen.test_set(leading->_idx)) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3232 |
leading = NULL; |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3233 |
while (regions.size() > 0) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3234 |
Node* r = regions.node(); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3235 |
uint i = regions.index(); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3236 |
if (i < r->req()) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3237 |
leading = r->in(i); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3238 |
regions.set_index(i+1); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3239 |
} else { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3240 |
regions.pop(); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3241 |
} |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3242 |
} |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3243 |
if (leading == NULL) { |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3244 |
assert(regions.size() == 0, "all paths should have been tried"); |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3245 |
return NULL; |
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3246 |
} |
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3247 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3248 |
if (leading->is_Region()) { |
52093
bbc90467f354
8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better
roland
parents:
51485
diff
changeset
|
3249 |
regions.push(leading, 2); |
51482
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3250 |
leading = leading->in(1); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3251 |
} else { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3252 |
leading = leading->in(0); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3253 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3254 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3255 |
#ifdef ASSERT |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3256 |
Unique_Node_List wq; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3257 |
wq.push((Node*)this); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3258 |
uint found = 0; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3259 |
for (uint i = 0; i < wq.size(); i++) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3260 |
Node* n = wq.at(i); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3261 |
if (n->is_Region()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3262 |
for (uint j = 1; j < n->req(); j++) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3263 |
Node* in = n->in(j); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3264 |
if (in != NULL && !in->is_top()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3265 |
wq.push(in); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3266 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3267 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3268 |
} else { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3269 |
if (n->is_MemBar() && n->as_MemBar()->leading()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3270 |
assert(n == leading, "consistency check failed"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3271 |
found++; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3272 |
} else { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3273 |
Node* in = n->in(0); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3274 |
if (in != NULL && !in->is_top()) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3275 |
wq.push(in); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3276 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3277 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3278 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3279 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3280 |
assert(found == 1 || (found == 0 && leading == NULL), "consistency check failed"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3281 |
#endif |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3282 |
if (leading == NULL) { |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3283 |
return NULL; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3284 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3285 |
MemBarNode* mb = leading->as_MemBar(); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3286 |
assert((mb->_kind == LeadingStore && _kind == TrailingStore) || |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3287 |
(mb->_kind == LeadingLoadStore && _kind == TrailingLoadStore), "bad leading membar"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3288 |
assert(mb->_pair_idx == _pair_idx, "bad leading membar"); |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3289 |
return mb; |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3290 |
} |
d7029542d67a
8209420: Track membars for volatile accesses so they can be properly optimized
roland
parents:
51333
diff
changeset
|
3291 |
|
1 | 3292 |
//===========================InitializeNode==================================== |
3293 |
// SUMMARY: |
|
3294 |
// This node acts as a memory barrier on raw memory, after some raw stores. |
|
3295 |
// The 'cooked' oop value feeds from the Initialize, not the Allocation. |
|
3296 |
// The Initialize can 'capture' suitably constrained stores as raw inits. |
|
3297 |
// It can coalesce related raw stores into larger units (called 'tiles'). |
|
3298 |
// It can avoid zeroing new storage for memory units which have raw inits. |
|
3299 |
// At macro-expansion, it is marked 'complete', and does not optimize further. |
|
3300 |
// |
|
3301 |
// EXAMPLE: |
|
3302 |
// The object 'new short[2]' occupies 16 bytes in a 32-bit machine. |
|
3303 |
// ctl = incoming control; mem* = incoming memory |
|
3304 |
// (Note: A star * on a memory edge denotes I/O and other standard edges.) |
|
3305 |
// First allocate uninitialized memory and fill in the header: |
|
3306 |
// alloc = (Allocate ctl mem* 16 #short[].klass ...) |
|
3307 |
// ctl := alloc.Control; mem* := alloc.Memory* |
|
3308 |
// rawmem = alloc.Memory; rawoop = alloc.RawAddress |
|
3309 |
// Then initialize to zero the non-header parts of the raw memory block: |
|
3310 |
// init = (Initialize alloc.Control alloc.Memory* alloc.RawAddress) |
|
3311 |
// ctl := init.Control; mem.SLICE(#short[*]) := init.Memory |
|
3312 |
// After the initialize node executes, the object is ready for service: |
|
3313 |
// oop := (CheckCastPP init.Control alloc.RawAddress #short[]) |
|
3314 |
// Suppose its body is immediately initialized as {1,2}: |
|
3315 |
// store1 = (StoreC init.Control init.Memory (+ oop 12) 1) |
|
3316 |
// store2 = (StoreC init.Control store1 (+ oop 14) 2) |
|
3317 |
// mem.SLICE(#short[*]) := store2 |
|
3318 |
// |
|
3319 |
// DETAILS: |
|
3320 |
// An InitializeNode collects and isolates object initialization after |
|
3321 |
// an AllocateNode and before the next possible safepoint. As a |
|
3322 |
// memory barrier (MemBarNode), it keeps critical stores from drifting |
|
3323 |
// down past any safepoint or any publication of the allocation. |
|
3324 |
// Before this barrier, a newly-allocated object may have uninitialized bits. |
|
3325 |
// After this barrier, it may be treated as a real oop, and GC is allowed. |
|
3326 |
// |
|
3327 |
// The semantics of the InitializeNode include an implicit zeroing of |
|
3328 |
// the new object from object header to the end of the object. |
|
3329 |
// (The object header and end are determined by the AllocateNode.) |
|
3330 |
// |
|
3331 |
// Certain stores may be added as direct inputs to the InitializeNode. |
|
3332 |
// These stores must update raw memory, and they must be to addresses |
|
3333 |
// derived from the raw address produced by AllocateNode, and with |
|
3334 |
// a constant offset. They must be ordered by increasing offset. |
|
3335 |
// The first one is at in(RawStores), the last at in(req()-1). |
|
3336 |
// Unlike most memory operations, they are not linked in a chain, |
|
3337 |
// but are displayed in parallel as users of the rawmem output of |
|
3338 |
// the allocation. |
|
3339 |
// |
|
3340 |
// (See comments in InitializeNode::capture_store, which continue |
|
3341 |
// the example given above.) |
|
3342 |
// |
|
3343 |
// When the associated Allocate is macro-expanded, the InitializeNode |
|
3344 |
// may be rewritten to optimize collected stores. A ClearArrayNode |
|
3345 |
// may also be created at that point to represent any required zeroing. |
|
3346 |
// The InitializeNode is then marked 'complete', prohibiting further |
|
3347 |
// capturing of nearby memory operations. |
|
3348 |
// |
|
3349 |
// During macro-expansion, all captured initializations which store |
|
2131 | 3350 |
// constant values of 32 bits or smaller are coalesced (if advantageous) |
1 | 3351 |
// into larger 'tiles' 32 or 64 bits. This allows an object to be |
3352 |
// initialized in fewer memory operations. Memory words which are |
|
3353 |
// covered by neither tiles nor non-constant stores are pre-zeroed |
|
3354 |
// by explicit stores of zero. (The code shape happens to do all |
|
3355 |
// zeroing first, then all other stores, with both sequences occurring |
|
3356 |
// in order of ascending offsets.) |
|
3357 |
// |
|
3358 |
// Alternatively, code may be inserted between an AllocateNode and its |
|
3359 |
// InitializeNode, to perform arbitrary initialization of the new object. |
|
3360 |
// E.g., the object copying intrinsics insert complex data transfers here. |
|
3361 |
// The initialization must then be marked as 'complete' disable the |
|
3362 |
// built-in zeroing semantics and the collection of initializing stores. |
|
3363 |
// |
|
3364 |
// While an InitializeNode is incomplete, reads from the memory state |
|
3365 |
// produced by it are optimizable if they match the control edge and |
|
3366 |
// new oop address associated with the allocation/initialization. |
|
3367 |
// They return a stored value (if the offset matches) or else zero. |
|
3368 |
// A write to the memory state, if it matches control and address, |
|
3369 |
// and if it is to a constant offset, may be 'captured' by the |
|
3370 |
// InitializeNode. It is cloned as a raw memory operation and rewired |
|
3371 |
// inside the initialization, to the raw oop produced by the allocation. |
|
3372 |
// Operations on addresses which are provably distinct (e.g., to |
|
3373 |
// other AllocateNodes) are allowed to bypass the initialization. |
|
3374 |
// |
|
3375 |
// The effect of all this is to consolidate object initialization |
|
3376 |
// (both arrays and non-arrays, both piecewise and bulk) into a |
|
3377 |
// single location, where it can be optimized as a unit. |
|
3378 |
// |
|
3379 |
// Only stores with an offset less than TrackedInitializationLimit words |
|
3380 |
// will be considered for capture by an InitializeNode. This puts a |
|
3381 |
// reasonable limit on the complexity of optimized initializations. |
|
3382 |
||
3383 |
//---------------------------InitializeNode------------------------------------ |
|
3384 |
InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop) |
|
51333
f6641fcf7b7e
8208670: Compiler changes to allow enabling -Wreorder
tschatzl
parents:
51078
diff
changeset
|
3385 |
: MemBarNode(C, adr_type, rawoop), |
f6641fcf7b7e
8208670: Compiler changes to allow enabling -Wreorder
tschatzl
parents:
51078
diff
changeset
|
3386 |
_is_complete(Incomplete), _does_not_escape(false) |
1 | 3387 |
{ |
3388 |
init_class_id(Class_Initialize); |
|
3389 |
||
3390 |
assert(adr_type == Compile::AliasIdxRaw, "only valid atp"); |
|
3391 |
assert(in(RawAddress) == rawoop, "proper init"); |
|
3392 |
// Note: allocation() can be NULL, for secondary initialization barriers |
|
3393 |
} |
|
3394 |
||
3395 |
// Since this node is not matched, it will be processed by the |
|
3396 |
// register allocator. Declare that there are no constraints |
|
3397 |
// on the allocation of the RawAddress edge. |
|
3398 |
const RegMask &InitializeNode::in_RegMask(uint idx) const { |
|
3399 |
// This edge should be set to top, by the set_complete. But be conservative. |
|
3400 |
if (idx == InitializeNode::RawAddress) |
|
3401 |
return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]); |
|
3402 |
return RegMask::Empty; |
|
3403 |
} |
|
3404 |
||
3405 |
Node* InitializeNode::memory(uint alias_idx) { |
|
3406 |
Node* mem = in(Memory); |
|
3407 |
if (mem->is_MergeMem()) { |
|
3408 |
return mem->as_MergeMem()->memory_at(alias_idx); |
|
3409 |
} else { |
|
3410 |
// incoming raw memory is not split |
|
3411 |
return mem; |
|
3412 |
} |
|
3413 |
} |
|
3414 |
||
3415 |
bool InitializeNode::is_non_zero() { |
|
3416 |
if (is_complete()) return false; |
|
3417 |
remove_extra_zeroes(); |
|
3418 |
return (req() > RawStores); |
|
3419 |
} |
|
3420 |
||
3421 |
void InitializeNode::set_complete(PhaseGVN* phase) { |
|
3422 |
assert(!is_complete(), "caller responsibility"); |
|
10566
630c177ec580
7081933: Use zeroing elimination optimization for large array
kvn
parents:
10511
diff
changeset
|
3423 |
_is_complete = Complete; |
1 | 3424 |
|
3425 |
// After this node is complete, it contains a bunch of |
|
3426 |
// raw-memory initializations. There is no need for |
|
3427 |
// it to have anything to do with non-raw memory effects. |
|
3428 |
// Therefore, tell all non-raw users to re-optimize themselves, |
|
3429 |
// after skipping the memory effects of this initialization. |
|
3430 |
PhaseIterGVN* igvn = phase->is_IterGVN(); |
|
3431 |
if (igvn) igvn->add_users_to_worklist(this); |
|
3432 |
} |
|
3433 |
||
3434 |
// convenience function |
|
3435 |
// return false if the init contains any stores already |
|
3436 |
bool AllocateNode::maybe_set_complete(PhaseGVN* phase) { |
|
3437 |
InitializeNode* init = initialization(); |
|
3438 |
if (init == NULL || init->is_complete()) return false; |
|
3439 |
init->remove_extra_zeroes(); |
|
3440 |
// for now, if this allocation has already collected any inits, bail: |
|
3441 |
if (init->is_non_zero()) return false; |
|
3442 |
init->set_complete(phase); |
|
3443 |
return true; |
|
3444 |
} |
|
3445 |
||
3446 |
void InitializeNode::remove_extra_zeroes() { |
|
3447 |
if (req() == RawStores) return; |
|
3448 |
Node* zmem = zero_memory(); |
|
3449 |
uint fill = RawStores; |
|
3450 |
for (uint i = fill; i < req(); i++) { |
|
3451 |
Node* n = in(i); |
|
3452 |
if (n->is_top() || n == zmem) continue; // skip |
|
3453 |
if (fill < i) set_req(fill, n); // compact |
|
3454 |
++fill; |
|
3455 |
} |
|
3456 |
// delete any empty spaces created: |
|
3457 |
while (fill < req()) { |
|
3458 |
del_req(fill); |
|
3459 |
} |
|
3460 |
} |
|
3461 |
||
3462 |
// Helper for remembering which stores go with which offsets. |
|
3463 |
intptr_t InitializeNode::get_store_offset(Node* st, PhaseTransform* phase) { |
|
3464 |
if (!st->is_Store()) return -1; // can happen to dead code via subsume_node |
|
3465 |
intptr_t offset = -1; |
|
3466 |
Node* base = AddPNode::Ideal_base_and_offset(st->in(MemNode::Address), |
|
3467 |
phase, offset); |
|
3468 |
if (base == NULL) return -1; // something is dead, |
|
3469 |
if (offset < 0) return -1; // dead, dead |
|
3470 |
return offset; |
|
3471 |
} |
|
3472 |
||
3473 |
// Helper for proving that an initialization expression is |
|
3474 |
// "simple enough" to be folded into an object initialization. |
|
3475 |
// Attempts to prove that a store's initial value 'n' can be captured |
|
3476 |
// within the initialization without creating a vicious cycle, such as: |
|
3477 |
// { Foo p = new Foo(); p.next = p; } |
|
3478 |
// True for constants and parameters and small combinations thereof. |
|
17383 | 3479 |
bool InitializeNode::detect_init_independence(Node* n, int& count) { |
1 | 3480 |
if (n == NULL) return true; // (can this really happen?) |
3481 |
if (n->is_Proj()) n = n->in(0); |
|
3482 |
if (n == this) return false; // found a cycle |
|
3483 |
if (n->is_Con()) return true; |
|
3484 |
if (n->is_Start()) return true; // params, etc., are OK |
|
3485 |
if (n->is_Root()) return true; // even better |
|
3486 |
||
3487 |
Node* ctl = n->in(0); |
|
3488 |
if (ctl != NULL && !ctl->is_top()) { |
|
3489 |
if (ctl->is_Proj()) ctl = ctl->in(0); |
|
3490 |
if (ctl == this) return false; |
|
3491 |
||
3492 |
// If we already know that the enclosing memory op is pinned right after |
|
3493 |
// the init, then any control flow that the store has picked up |
|
3494 |
// must have preceded the init, or else be equal to the init. |
|
3495 |
// Even after loop optimizations (which might change control edges) |
|
3496 |
// a store is never pinned *before* the availability of its inputs. |
|
366
449c27706bac
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
360
diff
changeset
|
3497 |
if (!MemNode::all_controls_dominate(n, this)) |
1 | 3498 |
return false; // failed to prove a good control |
3499 |
} |
|
3500 |
||
3501 |
// Check data edges for possible dependencies on 'this'. |
|
3502 |
if ((count += 1) > 20) return false; // complexity limit |
|
3503 |
for (uint i = 1; i < n->req(); i++) { |
|
3504 |
Node* m = n->in(i); |
|
3505 |
if (m == NULL || m == n || m->is_top()) continue; |
|
3506 |
uint first_i = n->find_edge(m); |
|
3507 |
if (i != first_i) continue; // process duplicate edge just once |
|
17383 | 3508 |
if (!detect_init_independence(m, count)) { |
1 | 3509 |
return false; |
3510 |
} |
|
3511 |
} |
|
3512 |
||
3513 |
return true; |
|
3514 |
} |
|
3515 |
||
3516 |
// Here are all the checks a Store must pass before it can be moved into |
|
3517 |
// an initialization. Returns zero if a check fails. |
|
3518 |
// On success, returns the (constant) offset to which the store applies, |
|
3519 |
// within the initialized memory. |
|
15813
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3520 |
intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) { |
1 | 3521 |
const int FAIL = 0; |
34157
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
3522 |
if (st->is_unaligned_access()) { |
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
3523 |
return FAIL; |
4fde32e81092
8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
roland
parents:
33459
diff
changeset
|
3524 |
} |
1 | 3525 |
if (st->req() != MemNode::ValueIn + 1) |
3526 |
return FAIL; // an inscrutable StoreNode (card mark?) |
|
3527 |
Node* ctl = st->in(MemNode::Control); |
|
3528 |
if (!(ctl != NULL && ctl->is_Proj() && ctl->in(0) == this)) |
|
3529 |
return FAIL; // must be unconditional after the initialization |
|
3530 |
Node* mem = st->in(MemNode::Memory); |
|
3531 |
if (!(mem->is_Proj() && mem->in(0) == this)) |
|
3532 |
return FAIL; // must not be preceded by other stores |
|
3533 |
Node* adr = st->in(MemNode::Address); |
|
3534 |
intptr_t offset; |
|
3535 |
AllocateNode* alloc = AllocateNode::Ideal_allocation(adr, phase, offset); |
|
3536 |
if (alloc == NULL) |
|
3537 |
return FAIL; // inscrutable address |
|
3538 |
if (alloc != allocation()) |
|
3539 |
return FAIL; // wrong allocation! (store needs to float up) |
|
3540 |
Node* val = st->in(MemNode::ValueIn); |
|
3541 |
int complexity_count = 0; |
|
17383 | 3542 |
if (!detect_init_independence(val, complexity_count)) |
1 | 3543 |
return FAIL; // stored value must be 'simple enough' |
3544 |
||
15813
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3545 |
// The Store can be captured only if nothing after the allocation |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3546 |
// and before the Store is using the memory location that the store |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3547 |
// overwrites. |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3548 |
bool failed = false; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3549 |
// If is_complete_with_arraycopy() is true the shape of the graph is |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3550 |
// well defined and is safe so no need for extra checks. |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3551 |
if (!is_complete_with_arraycopy()) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3552 |
// We are going to look at each use of the memory state following |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3553 |
// the allocation to make sure nothing reads the memory that the |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3554 |
// Store writes. |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3555 |
const TypePtr* t_adr = phase->type(adr)->isa_ptr(); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3556 |
int alias_idx = phase->C->get_alias_index(t_adr); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3557 |
ResourceMark rm; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3558 |
Unique_Node_List mems; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3559 |
mems.push(mem); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3560 |
Node* unique_merge = NULL; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3561 |
for (uint next = 0; next < mems.size(); ++next) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3562 |
Node *m = mems.at(next); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3563 |
for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3564 |
Node *n = m->fast_out(j); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3565 |
if (n->outcnt() == 0) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3566 |
continue; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3567 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3568 |
if (n == st) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3569 |
continue; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3570 |
} else if (n->in(0) != NULL && n->in(0) != ctl) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3571 |
// If the control of this use is different from the control |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3572 |
// of the Store which is right after the InitializeNode then |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3573 |
// this node cannot be between the InitializeNode and the |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3574 |
// Store. |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3575 |
continue; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3576 |
} else if (n->is_MergeMem()) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3577 |
if (n->as_MergeMem()->memory_at(alias_idx) == m) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3578 |
// We can hit a MergeMemNode (that will likely go away |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3579 |
// later) that is a direct use of the memory state |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3580 |
// following the InitializeNode on the same slice as the |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3581 |
// store node that we'd like to capture. We need to check |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3582 |
// the uses of the MergeMemNode. |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3583 |
mems.push(n); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3584 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3585 |
} else if (n->is_Mem()) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3586 |
Node* other_adr = n->in(MemNode::Address); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3587 |
if (other_adr == adr) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3588 |
failed = true; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3589 |
break; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3590 |
} else { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3591 |
const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr(); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3592 |
if (other_t_adr != NULL) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3593 |
int other_alias_idx = phase->C->get_alias_index(other_t_adr); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3594 |
if (other_alias_idx == alias_idx) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3595 |
// A load from the same memory slice as the store right |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3596 |
// after the InitializeNode. We check the control of the |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3597 |
// object/array that is loaded from. If it's the same as |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3598 |
// the store control then we cannot capture the store. |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3599 |
assert(!n->is_Store(), "2 stores to same slice on same control?"); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3600 |
Node* base = other_adr; |
33105
294e48b4f704
8080775: Better argument formatting for assert() and friends
david
parents:
32573
diff
changeset
|
3601 |
assert(base->is_AddP(), "should be addp but is %s", base->Name()); |
15813
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3602 |
base = base->in(AddPNode::Base); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3603 |
if (base != NULL) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3604 |
base = base->uncast(); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3605 |
if (base->is_Proj() && base->in(0) == alloc) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3606 |
failed = true; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3607 |
break; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3608 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3609 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3610 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3611 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3612 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3613 |
} else { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3614 |
failed = true; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3615 |
break; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3616 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3617 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3618 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3619 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3620 |
if (failed) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3621 |
if (!can_reshape) { |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3622 |
// We decided we couldn't capture the store during parsing. We |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3623 |
// should try again during the next IGVN once the graph is |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3624 |
// cleaner. |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3625 |
phase->C->record_for_igvn(st); |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3626 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3627 |
return FAIL; |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3628 |
} |
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3629 |
|
1 | 3630 |
return offset; // success |
3631 |
} |
|
3632 |
||
3633 |
// Find the captured store in(i) which corresponds to the range |
|
3634 |
// [start..start+size) in the initialized object. |
|
3635 |
// If there is one, return its index i. If there isn't, return the |
|
3636 |
// negative of the index where it should be inserted. |
|
3637 |
// Return 0 if the queried range overlaps an initialization boundary |
|
3638 |
// or if dead code is encountered. |
|
3639 |
// If size_in_bytes is zero, do not bother with overlap checks. |
|
3640 |
int InitializeNode::captured_store_insertion_point(intptr_t start, |
|
3641 |
int size_in_bytes, |
|
3642 |
PhaseTransform* phase) { |
|
3643 |
const int FAIL = 0, MAX_STORE = BytesPerLong; |
|
3644 |
||
3645 |
if (is_complete()) |
|
3646 |
return FAIL; // arraycopy got here first; punt |
|
3647 |
||
3648 |
assert(allocation() != NULL, "must be present"); |
|
3649 |
||
3650 |
// no negatives, no header fields: |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3651 |
if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL; |
1 | 3652 |
|
3653 |
// after a certain size, we bail out on tracking all the stores: |
|
3654 |
intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); |
|
3655 |
if (start >= ti_limit) return FAIL; |
|
3656 |
||
3657 |
for (uint i = InitializeNode::RawStores, limit = req(); ; ) { |
|
3658 |
if (i >= limit) return -(int)i; // not found; here is where to put it |
|
3659 |
||
3660 |
Node* st = in(i); |
|
3661 |
intptr_t st_off = get_store_offset(st, phase); |
|
3662 |
if (st_off < 0) { |
|
3663 |
if (st != zero_memory()) { |
|
3664 |
return FAIL; // bail out if there is dead garbage |
|
3665 |
} |
|
3666 |
} else if (st_off > start) { |
|
3667 |
// ...we are done, since stores are ordered |
|
3668 |
if (st_off < start + size_in_bytes) { |
|
3669 |
return FAIL; // the next store overlaps |
|
3670 |
} |
|
3671 |
return -(int)i; // not found; here is where to put it |
|
3672 |
} else if (st_off < start) { |
|
3673 |
if (size_in_bytes != 0 && |
|
3674 |
start < st_off + MAX_STORE && |
|
3675 |
start < st_off + st->as_Store()->memory_size()) { |
|
3676 |
return FAIL; // the previous store overlaps |
|
3677 |
} |
|
3678 |
} else { |
|
3679 |
if (size_in_bytes != 0 && |
|
3680 |
st->as_Store()->memory_size() != size_in_bytes) { |
|
3681 |
return FAIL; // mismatched store size |
|
3682 |
} |
|
3683 |
return i; |
|
3684 |
} |
|
3685 |
||
3686 |
++i; |
|
3687 |
} |
|
3688 |
} |
|
3689 |
||
3690 |
// Look for a captured store which initializes at the offset 'start' |
|
3691 |
// with the given size. If there is no such store, and no other |
|
3692 |
// initialization interferes, then return zero_memory (the memory |
|
3693 |
// projection of the AllocateNode). |
|
3694 |
Node* InitializeNode::find_captured_store(intptr_t start, int size_in_bytes, |
|
3695 |
PhaseTransform* phase) { |
|
3696 |
assert(stores_are_sane(phase), ""); |
|
3697 |
int i = captured_store_insertion_point(start, size_in_bytes, phase); |
|
3698 |
if (i == 0) { |
|
3699 |
return NULL; // something is dead |
|
3700 |
} else if (i < 0) { |
|
3701 |
return zero_memory(); // just primordial zero bits here |
|
3702 |
} else { |
|
3703 |
Node* st = in(i); // here is the store at this position |
|
3704 |
assert(get_store_offset(st->as_Store(), phase) == start, "sanity"); |
|
3705 |
return st; |
|
3706 |
} |
|
3707 |
} |
|
3708 |
||
3709 |
// Create, as a raw pointer, an address within my new object at 'offset'. |
|
3710 |
Node* InitializeNode::make_raw_address(intptr_t offset, |
|
3711 |
PhaseTransform* phase) { |
|
3712 |
Node* addr = in(RawAddress); |
|
3713 |
if (offset != 0) { |
|
3714 |
Compile* C = phase->C; |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
3715 |
addr = phase->transform( new AddPNode(C->top(), addr, |
1 | 3716 |
phase->MakeConX(offset)) ); |
3717 |
} |
|
3718 |
return addr; |
|
3719 |
} |
|
3720 |
||
3721 |
// Clone the given store, converting it into a raw store |
|
3722 |
// initializing a field or element of my new object. |
|
3723 |
// Caller is responsible for retiring the original store, |
|
3724 |
// with subsume_node or the like. |
|
3725 |
// |
|
3726 |
// From the example above InitializeNode::InitializeNode, |
|
3727 |
// here are the old stores to be captured: |
|
3728 |
// store1 = (StoreC init.Control init.Memory (+ oop 12) 1) |
|
3729 |
// store2 = (StoreC init.Control store1 (+ oop 14) 2) |
|
3730 |
// |
|
3731 |
// Here is the changed code; note the extra edges on init: |
|
3732 |
// alloc = (Allocate ...) |
|
3733 |
// rawoop = alloc.RawAddress |
|
3734 |
// rawstore1 = (StoreC alloc.Control alloc.Memory (+ rawoop 12) 1) |
|
3735 |
// rawstore2 = (StoreC alloc.Control alloc.Memory (+ rawoop 14) 2) |
|
3736 |
// init = (Initialize alloc.Control alloc.Memory rawoop |
|
3737 |
// rawstore1 rawstore2) |
|
3738 |
// |
|
3739 |
Node* InitializeNode::capture_store(StoreNode* st, intptr_t start, |
|
15813
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3740 |
PhaseTransform* phase, bool can_reshape) { |
1 | 3741 |
assert(stores_are_sane(phase), ""); |
3742 |
||
3743 |
if (start < 0) return NULL; |
|
15813
6efd4c793e47
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
15242
diff
changeset
|
3744 |
assert(can_capture_store(st, phase, can_reshape) == start, "sanity"); |
1 | 3745 |
|
3746 |
Compile* C = phase->C; |
|
3747 |
int size_in_bytes = st->memory_size(); |
|
3748 |
int i = captured_store_insertion_point(start, size_in_bytes, phase); |
|
3749 |
if (i == 0) return NULL; // bail out |
|
3750 |
Node* prev_mem = NULL; // raw memory for the captured store |
|
3751 |
if (i > 0) { |
|
3752 |
prev_mem = in(i); // there is a pre-existing store under this one |
|
3753 |
set_req(i, C->top()); // temporarily disconnect it |
|
3754 |
// See StoreNode::Ideal 'st->outcnt() == 1' for the reason to disconnect. |
|
3755 |
} else { |
|
3756 |
i = -i; // no pre-existing store |
|
3757 |
prev_mem = zero_memory(); // a slice of the newly allocated object |
|
3758 |
if (i > InitializeNode::RawStores && in(i-1) == prev_mem) |
|
3759 |
set_req(--i, C->top()); // reuse this edge; it has been folded away |
|
3760 |
else |
|
3761 |
ins_req(i, C->top()); // build a new edge |
|
3762 |
} |
|
3763 |
Node* new_st = st->clone(); |
|
3764 |
new_st->set_req(MemNode::Control, in(Control)); |
|
3765 |
new_st->set_req(MemNode::Memory, prev_mem); |
|
3766 |
new_st->set_req(MemNode::Address, make_raw_address(start, phase)); |
|
3767 |
new_st = phase->transform(new_st); |
|
3768 |
||
3769 |
// At this point, new_st might have swallowed a pre-existing store |
|
3770 |
// at the same offset, or perhaps new_st might have disappeared, |
|
3771 |
// if it redundantly stored the same value (or zero to fresh memory). |
|
3772 |
||
3773 |
// In any case, wire it in: |
|
25913
81dbc151e91c
8040213: C2 does not put all modified nodes on IGVN worklist
thartmann
parents:
24923
diff
changeset
|
3774 |
phase->igvn_rehash_node_delayed(this); |
1 | 3775 |
set_req(i, new_st); |
3776 |
||
3777 |
// The caller may now kill the old guy. |
|
3778 |
DEBUG_ONLY(Node* check_st = find_captured_store(start, size_in_bytes, phase)); |
|
3779 |
assert(check_st == new_st || check_st == NULL, "must be findable"); |
|
3780 |
assert(!is_complete(), ""); |
|
3781 |
return new_st; |
|
3782 |
} |
|
3783 |
||
3784 |
static bool store_constant(jlong* tiles, int num_tiles, |
|
3785 |
intptr_t st_off, int st_size, |
|
3786 |
jlong con) { |
|
3787 |
if ((st_off & (st_size-1)) != 0) |
|
3788 |
return false; // strange store offset (assume size==2**N) |
|
3789 |
address addr = (address)tiles + st_off; |
|
3790 |
assert(st_off >= 0 && addr+st_size <= (address)&tiles[num_tiles], "oob"); |
|
3791 |
switch (st_size) { |
|
3792 |
case sizeof(jbyte): *(jbyte*) addr = (jbyte) con; break; |
|
3793 |
case sizeof(jchar): *(jchar*) addr = (jchar) con; break; |
|
3794 |
case sizeof(jint): *(jint*) addr = (jint) con; break; |
|
3795 |
case sizeof(jlong): *(jlong*) addr = (jlong) con; break; |
|
3796 |
default: return false; // strange store size (detect size!=2**N here) |
|
3797 |
} |
|
3798 |
return true; // return success to caller |
|
3799 |
} |
|
3800 |
||
3801 |
// Coalesce subword constants into int constants and possibly |
|
3802 |
// into long constants. The goal, if the CPU permits, |
|
3803 |
// is to initialize the object with a small number of 64-bit tiles. |
|
3804 |
// Also, convert floating-point constants to bit patterns. |
|
3805 |
// Non-constants are not relevant to this pass. |
|
3806 |
// |
|
3807 |
// In terms of the running example on InitializeNode::InitializeNode |
|
3808 |
// and InitializeNode::capture_store, here is the transformation |
|
3809 |
// of rawstore1 and rawstore2 into rawstore12: |
|
3810 |
// alloc = (Allocate ...) |
|
3811 |
// rawoop = alloc.RawAddress |
|
3812 |
// tile12 = 0x00010002 |
|
3813 |
// rawstore12 = (StoreI alloc.Control alloc.Memory (+ rawoop 12) tile12) |
|
3814 |
// init = (Initialize alloc.Control alloc.Memory rawoop rawstore12) |
|
3815 |
// |
|
3816 |
void |
|
3817 |
InitializeNode::coalesce_subword_stores(intptr_t header_size, |
|
3818 |
Node* size_in_bytes, |
|
3819 |
PhaseGVN* phase) { |
|
3820 |
Compile* C = phase->C; |
|
3821 |
||
3822 |
assert(stores_are_sane(phase), ""); |
|
3823 |
// Note: After this pass, they are not completely sane, |
|
3824 |
// since there may be some overlaps. |
|
3825 |
||
3826 |
int old_subword = 0, old_long = 0, new_int = 0, new_long = 0; |
|
3827 |
||
3828 |
intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); |
|
3829 |
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit); |
|
3830 |
size_limit = MIN2(size_limit, ti_limit); |
|
46619
a3919f5e8d2b
8178499: Remove _ptr_ and _size_ infixes from align functions
stefank
parents:
46589
diff
changeset
|
3831 |
size_limit = align_up(size_limit, BytesPerLong); |
1 | 3832 |
int num_tiles = size_limit / BytesPerLong; |
3833 |
||
3834 |
// allocate space for the tile map: |
|
3835 |
const int small_len = DEBUG_ONLY(true ? 3 :) 30; // keep stack frames small |
|
3836 |
jlong tiles_buf[small_len]; |
|
3837 |
Node* nodes_buf[small_len]; |
|
3838 |
jlong inits_buf[small_len]; |
|
3839 |
jlong* tiles = ((num_tiles <= small_len) ? &tiles_buf[0] |
|
3840 |
: NEW_RESOURCE_ARRAY(jlong, num_tiles)); |
|
3841 |
Node** nodes = ((num_tiles <= small_len) ? &nodes_buf[0] |
|
3842 |
: NEW_RESOURCE_ARRAY(Node*, num_tiles)); |
|
3843 |
jlong* inits = ((num_tiles <= small_len) ? &inits_buf[0] |
|
3844 |
: NEW_RESOURCE_ARRAY(jlong, num_tiles)); |
|
3845 |
// tiles: exact bitwise model of all primitive constants |
|
3846 |
// nodes: last constant-storing node subsumed into the tiles model |
|
3847 |
// inits: which bytes (in each tile) are touched by any initializations |
|
3848 |
||
3849 |
//// Pass A: Fill in the tile model with any relevant stores. |
|
3850 |
||
3851 |
Copy::zero_to_bytes(tiles, sizeof(tiles[0]) * num_tiles); |
|
3852 |
Copy::zero_to_bytes(nodes, sizeof(nodes[0]) * num_tiles); |
|
3853 |
Copy::zero_to_bytes(inits, sizeof(inits[0]) * num_tiles); |
|
3854 |
Node* zmem = zero_memory(); // initially zero memory state |
|
3855 |
for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) { |
|
3856 |
Node* st = in(i); |
|
3857 |
intptr_t st_off = get_store_offset(st, phase); |
|
3858 |
||
3859 |
// Figure out the store's offset and constant value: |
|
3860 |
if (st_off < header_size) continue; //skip (ignore header) |
|
3861 |
if (st->in(MemNode::Memory) != zmem) continue; //skip (odd store chain) |
|
3862 |
int st_size = st->as_Store()->memory_size(); |
|
3863 |
if (st_off + st_size > size_limit) break; |
|
3864 |
||
3865 |
// Record which bytes are touched, whether by constant or not. |
|
3866 |
if (!store_constant(inits, num_tiles, st_off, st_size, (jlong) -1)) |
|
3867 |
continue; // skip (strange store size) |
|
3868 |
||
3869 |
const Type* val = phase->type(st->in(MemNode::ValueIn)); |
|
3870 |
if (!val->singleton()) continue; //skip (non-con store) |
|
3871 |
BasicType type = val->basic_type(); |
|
3872 |
||
3873 |
jlong con = 0; |
|
3874 |
switch (type) { |
|
3875 |
case T_INT: con = val->is_int()->get_con(); break; |
|
3876 |
case T_LONG: con = val->is_long()->get_con(); break; |
|
3877 |
case T_FLOAT: con = jint_cast(val->getf()); break; |
|
3878 |
case T_DOUBLE: con = jlong_cast(val->getd()); break; |
|
3879 |
default: continue; //skip (odd store type) |
|
3880 |
} |
|
3881 |
||
3882 |
if (type == T_LONG && Matcher::isSimpleConstant64(con) && |
|
3883 |
st->Opcode() == Op_StoreL) { |
|
3884 |
continue; // This StoreL is already optimal. |
|
3885 |
} |
|
3886 |
||
3887 |
// Store down the constant. |
|
3888 |
store_constant(tiles, num_tiles, st_off, st_size, con); |
|
3889 |
||
3890 |
intptr_t j = st_off >> LogBytesPerLong; |
|
3891 |
||
3892 |
if (type == T_INT && st_size == BytesPerInt |
|
3893 |
&& (st_off & BytesPerInt) == BytesPerInt) { |
|
3894 |
jlong lcon = tiles[j]; |
|
3895 |
if (!Matcher::isSimpleConstant64(lcon) && |
|
3896 |
st->Opcode() == Op_StoreI) { |
|
3897 |
// This StoreI is already optimal by itself. |
|
3898 |
jint* intcon = (jint*) &tiles[j]; |
|
3899 |
intcon[1] = 0; // undo the store_constant() |
|
3900 |
||
3901 |
// If the previous store is also optimal by itself, back up and |
|
3902 |
// undo the action of the previous loop iteration... if we can. |
|
3903 |
// But if we can't, just let the previous half take care of itself. |
|
3904 |
st = nodes[j]; |
|
3905 |
st_off -= BytesPerInt; |
|
3906 |
con = intcon[0]; |
|
3907 |
if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) { |
|
3908 |
assert(st_off >= header_size, "still ignoring header"); |
|
3909 |
assert(get_store_offset(st, phase) == st_off, "must be"); |
|
3910 |
assert(in(i-1) == zmem, "must be"); |
|
3911 |
DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn))); |
|
3912 |
assert(con == tcon->is_int()->get_con(), "must be"); |
|
3913 |
// Undo the effects of the previous loop trip, which swallowed st: |
|
3914 |
intcon[0] = 0; // undo store_constant() |
|
3915 |
set_req(i-1, st); // undo set_req(i, zmem) |
|
3916 |
nodes[j] = NULL; // undo nodes[j] = st |
|
3917 |
--old_subword; // undo ++old_subword |
|
3918 |
} |
|
3919 |
continue; // This StoreI is already optimal. |
|
3920 |
} |
|
3921 |
} |
|
3922 |
||
3923 |
// This store is not needed. |
|
3924 |
set_req(i, zmem); |
|
3925 |
nodes[j] = st; // record for the moment |
|
3926 |
if (st_size < BytesPerLong) // something has changed |
|
3927 |
++old_subword; // includes int/float, but who's counting... |
|
3928 |
else ++old_long; |
|
3929 |
} |
|
3930 |
||
3931 |
if ((old_subword + old_long) == 0) |
|
3932 |
return; // nothing more to do |
|
3933 |
||
3934 |
//// Pass B: Convert any non-zero tiles into optimal constant stores. |
|
3935 |
// Be sure to insert them before overlapping non-constant stores. |
|
3936 |
// (E.g., byte[] x = { 1,2,y,4 } => x[int 0] = 0x01020004, x[2]=y.) |
|
3937 |
for (int j = 0; j < num_tiles; j++) { |
|
3938 |
jlong con = tiles[j]; |
|
3939 |
jlong init = inits[j]; |
|
3940 |
if (con == 0) continue; |
|
3941 |
jint con0, con1; // split the constant, address-wise |
|
3942 |
jint init0, init1; // split the init map, address-wise |
|
3943 |
{ union { jlong con; jint intcon[2]; } u; |
|
3944 |
u.con = con; |
|
3945 |
con0 = u.intcon[0]; |
|
3946 |
con1 = u.intcon[1]; |
|
3947 |
u.con = init; |
|
3948 |
init0 = u.intcon[0]; |
|
3949 |
init1 = u.intcon[1]; |
|
3950 |
} |
|
3951 |
||
3952 |
Node* old = nodes[j]; |
|
3953 |
assert(old != NULL, "need the prior store"); |
|
3954 |
intptr_t offset = (j * BytesPerLong); |
|
3955 |
||
3956 |
bool split = !Matcher::isSimpleConstant64(con); |
|
3957 |
||
3958 |
if (offset < header_size) { |
|
3959 |
assert(offset + BytesPerInt >= header_size, "second int counts"); |
|
3960 |
assert(*(jint*)&tiles[j] == 0, "junk in header"); |
|
3961 |
split = true; // only the second word counts |
|
3962 |
// Example: int a[] = { 42 ... } |
|
3963 |
} else if (con0 == 0 && init0 == -1) { |
|
3964 |
split = true; // first word is covered by full inits |
|
3965 |
// Example: int a[] = { ... foo(), 42 ... } |
|
3966 |
} else if (con1 == 0 && init1 == -1) { |
|
3967 |
split = true; // second word is covered by full inits |
|
3968 |
// Example: int a[] = { ... 42, foo() ... } |
|
3969 |
} |
|
3970 |
||
3971 |
// Here's a case where init0 is neither 0 nor -1: |
|
3972 |
// byte a[] = { ... 0,0,foo(),0, 0,0,0,42 ... } |
|
3973 |
// Assuming big-endian memory, init0, init1 are 0x0000FF00, 0x000000FF. |
|
3974 |
// In this case the tile is not split; it is (jlong)42. |
|
3975 |
// The big tile is stored down, and then the foo() value is inserted. |
|
3976 |
// (If there were foo(),foo() instead of foo(),0, init0 would be -1.) |
|
3977 |
||
3978 |
Node* ctl = old->in(MemNode::Control); |
|
3979 |
Node* adr = make_raw_address(offset, phase); |
|
3980 |
const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
3981 |
||
3982 |
// One or two coalesced stores to plop down. |
|
3983 |
Node* st[2]; |
|
3984 |
intptr_t off[2]; |
|
3985 |
int nst = 0; |
|
3986 |
if (!split) { |
|
3987 |
++new_long; |
|
3988 |
off[nst] = offset; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3989 |
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
22845
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
3990 |
phase->longcon(con), T_LONG, MemNode::unordered); |
1 | 3991 |
} else { |
3992 |
// Omit either if it is a zero. |
|
3993 |
if (con0 != 0) { |
|
3994 |
++new_int; |
|
3995 |
off[nst] = offset; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
3996 |
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
22845
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
3997 |
phase->intcon(con0), T_INT, MemNode::unordered); |
1 | 3998 |
} |
3999 |
if (con1 != 0) { |
|
4000 |
++new_int; |
|
4001 |
offset += BytesPerInt; |
|
4002 |
adr = make_raw_address(offset, phase); |
|
4003 |
off[nst] = offset; |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
4004 |
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
22845
d8812d0ff387
8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
goetz
parents:
19995
diff
changeset
|
4005 |
phase->intcon(con1), T_INT, MemNode::unordered); |
1 | 4006 |
} |
4007 |
} |
|
4008 |
||
4009 |
// Insert second store first, then the first before the second. |
|
4010 |
// Insert each one just before any overlapping non-constant stores. |
|
4011 |
while (nst > 0) { |
|
4012 |
Node* st1 = st[--nst]; |
|
4013 |
C->copy_node_notes_to(st1, old); |
|
4014 |
st1 = phase->transform(st1); |
|
4015 |
offset = off[nst]; |
|
4016 |
assert(offset >= header_size, "do not smash header"); |
|
4017 |
int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase); |
|
4018 |
guarantee(ins_idx != 0, "must re-insert constant store"); |
|
4019 |
if (ins_idx < 0) ins_idx = -ins_idx; // never overlap |
|
4020 |
if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem) |
|
4021 |
set_req(--ins_idx, st1); |
|
4022 |
else |
|
4023 |
ins_req(ins_idx, st1); |
|
4024 |
} |
|
4025 |
} |
|
4026 |
||
4027 |
if (PrintCompilation && WizardMode) |
|
4028 |
tty->print_cr("Changed %d/%d subword/long constants into %d/%d int/long", |
|
4029 |
old_subword, old_long, new_int, new_long); |
|
4030 |
if (C->log() != NULL) |
|
4031 |
C->log()->elem("comment that='%d/%d subword/long to %d/%d int/long'", |
|
4032 |
old_subword, old_long, new_int, new_long); |
|
4033 |
||
4034 |
// Clean up any remaining occurrences of zmem: |
|
4035 |
remove_extra_zeroes(); |
|
4036 |
} |
|
4037 |
||
4038 |
// Explore forward from in(start) to find the first fully initialized |
|
4039 |
// word, and return its offset. Skip groups of subword stores which |
|
4040 |
// together initialize full words. If in(start) is itself part of a |
|
4041 |
// fully initialized word, return the offset of in(start). If there |
|
4042 |
// are no following full-word stores, or if something is fishy, return |
|
4043 |
// a negative value. |
|
4044 |
intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) { |
|
4045 |
int int_map = 0; |
|
4046 |
intptr_t int_map_off = 0; |
|
4047 |
const int FULL_MAP = right_n_bits(BytesPerInt); // the int_map we hope for |
|
4048 |
||
4049 |
for (uint i = start, limit = req(); i < limit; i++) { |
|
4050 |
Node* st = in(i); |
|
4051 |
||
4052 |
intptr_t st_off = get_store_offset(st, phase); |
|
4053 |
if (st_off < 0) break; // return conservative answer |
|
4054 |
||
4055 |
int st_size = st->as_Store()->memory_size(); |
|
4056 |
if (st_size >= BytesPerInt && (st_off % BytesPerInt) == 0) { |
|
4057 |
return st_off; // we found a complete word init |
|
4058 |
} |
|
4059 |
||
4060 |
// update the map: |
|
4061 |
||
46619
a3919f5e8d2b
8178499: Remove _ptr_ and _size_ infixes from align functions
stefank
parents:
46589
diff
changeset
|
4062 |
intptr_t this_int_off = align_down(st_off, BytesPerInt); |
1 | 4063 |
if (this_int_off != int_map_off) { |
4064 |
// reset the map: |
|
4065 |
int_map = 0; |
|
4066 |
int_map_off = this_int_off; |
|
4067 |
} |
|
4068 |
||
4069 |
int subword_off = st_off - this_int_off; |
|
4070 |
int_map |= right_n_bits(st_size) << subword_off; |
|
4071 |
if ((int_map & FULL_MAP) == FULL_MAP) { |
|
4072 |
return this_int_off; // we found a complete word init |
|
4073 |
} |
|
4074 |
||
4075 |
// Did this store hit or cross the word boundary? |
|
46619
a3919f5e8d2b
8178499: Remove _ptr_ and _size_ infixes from align functions
stefank
parents:
46589
diff
changeset
|
4076 |
intptr_t next_int_off = align_down(st_off + st_size, BytesPerInt); |
1 | 4077 |
if (next_int_off == this_int_off + BytesPerInt) { |
4078 |
// We passed the current int, without fully initializing it. |
|
4079 |
int_map_off = next_int_off; |
|
4080 |
int_map >>= BytesPerInt; |
|
4081 |
} else if (next_int_off > this_int_off + BytesPerInt) { |
|
4082 |
// We passed the current and next int. |
|
4083 |
return this_int_off + BytesPerInt; |
|
4084 |
} |
|
4085 |
} |
|
4086 |
||
4087 |
return -1; |
|
4088 |
} |
|
4089 |
||
4090 |
||
4091 |
// Called when the associated AllocateNode is expanded into CFG. |
|
4092 |
// At this point, we may perform additional optimizations. |
|
4093 |
// Linearize the stores by ascending offset, to make memory |
|
4094 |
// activity as coherent as possible. |
|
4095 |
Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, |
|
4096 |
intptr_t header_size, |
|
4097 |
Node* size_in_bytes, |
|
4098 |
PhaseGVN* phase) { |
|
4099 |
assert(!is_complete(), "not already complete"); |
|
4100 |
assert(stores_are_sane(phase), ""); |
|
4101 |
assert(allocation() != NULL, "must be present"); |
|
4102 |
||
4103 |
remove_extra_zeroes(); |
|
4104 |
||
4105 |
if (ReduceFieldZeroing || ReduceBulkZeroing) |
|
4106 |
// reduce instruction count for common initialization patterns |
|
4107 |
coalesce_subword_stores(header_size, size_in_bytes, phase); |
|
4108 |
||
4109 |
Node* zmem = zero_memory(); // initially zero memory state |
|
4110 |
Node* inits = zmem; // accumulating a linearized chain of inits |
|
4111 |
#ifdef ASSERT |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
4112 |
intptr_t first_offset = allocation()->minimum_header_size(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
4113 |
intptr_t last_init_off = first_offset; // previous init offset |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
4114 |
intptr_t last_init_end = first_offset; // previous init offset+size |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
4115 |
intptr_t last_tile_end = first_offset; // previous tile offset+size |
1 | 4116 |
#endif |
4117 |
intptr_t zeroes_done = header_size; |
|
4118 |
||
4119 |
bool do_zeroing = true; // we might give up if inits are very sparse |
|
4120 |
int big_init_gaps = 0; // how many large gaps have we seen? |
|
4121 |
||
35548
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
34189
diff
changeset
|
4122 |
if (UseTLAB && ZeroTLAB) do_zeroing = false; |
1 | 4123 |
if (!ReduceFieldZeroing && !ReduceBulkZeroing) do_zeroing = false; |
4124 |
||
4125 |
for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) { |
|
4126 |
Node* st = in(i); |
|
4127 |
intptr_t st_off = get_store_offset(st, phase); |
|
4128 |
if (st_off < 0) |
|
4129 |
break; // unknown junk in the inits |
|
4130 |
if (st->in(MemNode::Memory) != zmem) |
|
4131 |
break; // complicated store chains somehow in list |
|
4132 |
||
4133 |
int st_size = st->as_Store()->memory_size(); |
|
4134 |
intptr_t next_init_off = st_off + st_size; |
|
4135 |
||
4136 |
if (do_zeroing && zeroes_done < next_init_off) { |
|
4137 |
// See if this store needs a zero before it or under it. |
|
4138 |
intptr_t zeroes_needed = st_off; |
|
4139 |
||
4140 |
if (st_size < BytesPerInt) { |
|
4141 |
// Look for subword stores which only partially initialize words. |
|
4142 |
// If we find some, we must lay down some word-level zeroes first, |
|
4143 |
// underneath the subword stores. |
|
4144 |
// |
|
4145 |
// Examples: |
|
4146 |
// byte[] a = { p,q,r,s } => a[0]=p,a[1]=q,a[2]=r,a[3]=s |
|
4147 |
// byte[] a = { x,y,0,0 } => a[0..3] = 0, a[0]=x,a[1]=y |
|
4148 |
// byte[] a = { 0,0,z,0 } => a[0..3] = 0, a[2]=z |
|
4149 |
// |
|
4150 |
// Note: coalesce_subword_stores may have already done this, |
|
4151 |
// if it was prompted by constant non-zero subword initializers. |
|
4152 |
// But this case can still arise with non-constant stores. |
|
4153 |
||
4154 |
intptr_t next_full_store = find_next_fullword_store(i, phase); |
|
4155 |
||
4156 |
// In the examples above: |
|
4157 |
// in(i) p q r s x y z |
|
4158 |
// st_off 12 13 14 15 12 13 14 |
|
4159 |
// st_size 1 1 1 1 1 1 1 |
|
4160 |
// next_full_s. 12 16 16 16 16 16 16 |
|
4161 |
// z's_done 12 16 16 16 12 16 12 |
|
4162 |
// z's_needed 12 16 16 16 16 16 16 |
|
4163 |
// zsize 0 0 0 0 4 0 4 |
|
4164 |
if (next_full_store < 0) { |
|
4165 |
// Conservative tack: Zero to end of current word. |
|
46619
a3919f5e8d2b
8178499: Remove _ptr_ and _size_ infixes from align functions
stefank
parents:
46589
diff
changeset
|
4166 |
zeroes_needed = align_up(zeroes_needed, BytesPerInt); |
1 | 4167 |
} else { |
4168 |
// Zero to beginning of next fully initialized word. |
|
4169 |
// Or, don't zero at all, if we are already in that word. |
|
4170 |
assert(next_full_store >= zeroes_needed, "must go forward"); |
|
4171 |
assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary"); |
|
4172 |
zeroes_needed = next_full_store; |
|
4173 |
} |
|
4174 |
} |
|
4175 |
||
4176 |
if (zeroes_needed > zeroes_done) { |
|
4177 |
intptr_t zsize = zeroes_needed - zeroes_done; |
|
4178 |
// Do some incremental zeroing on rawmem, in parallel with inits. |
|
46619
a3919f5e8d2b
8178499: Remove _ptr_ and _size_ infixes from align functions
stefank
parents:
46589
diff
changeset
|
4179 |
zeroes_done = align_down(zeroes_done, BytesPerInt); |
1 | 4180 |
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, |
4181 |
zeroes_done, zeroes_needed, |
|
4182 |
phase); |
|
4183 |
zeroes_done = zeroes_needed; |
|
36554
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
36342
diff
changeset
|
4184 |
if (zsize > InitArrayShortSize && ++big_init_gaps > 2) |
1 | 4185 |
do_zeroing = false; // leave the hole, next time |
4186 |
} |
|
4187 |
} |
|
4188 |
||
4189 |
// Collect the store and move on: |
|
4190 |
st->set_req(MemNode::Memory, inits); |
|
4191 |
inits = st; // put it on the linearized chain |
|
4192 |
set_req(i, zmem); // unhook from previous position |
|
4193 |
||
4194 |
if (zeroes_done == st_off) |
|
4195 |
zeroes_done = next_init_off; |
|
4196 |
||
4197 |
assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any"); |
|
4198 |
||
4199 |
#ifdef ASSERT |
|
4200 |
// Various order invariants. Weaker than stores_are_sane because |
|
4201 |
// a large constant tile can be filled in by smaller non-constant stores. |
|
4202 |
assert(st_off >= last_init_off, "inits do not reverse"); |
|
4203 |
last_init_off = st_off; |
|
4204 |
const Type* val = NULL; |
|
4205 |
if (st_size >= BytesPerInt && |
|
4206 |
(val = phase->type(st->in(MemNode::ValueIn)))->singleton() && |
|
4207 |
(int)val->basic_type() < (int)T_OBJECT) { |
|
4208 |
assert(st_off >= last_tile_end, "tiles do not overlap"); |
|
4209 |
assert(st_off >= last_init_end, "tiles do not overwrite inits"); |
|
4210 |
last_tile_end = MAX2(last_tile_end, next_init_off); |
|
4211 |
} else { |
|
46619
a3919f5e8d2b
8178499: Remove _ptr_ and _size_ infixes from align functions
stefank
parents:
46589
diff
changeset
|
4212 |
intptr_t st_tile_end = align_up(next_init_off, BytesPerLong); |
1 | 4213 |
assert(st_tile_end >= last_tile_end, "inits stay with tiles"); |
4214 |
assert(st_off >= last_init_end, "inits do not overlap"); |
|
4215 |
last_init_end = next_init_off; // it's a non-tile |
|
4216 |
} |
|
4217 |
#endif //ASSERT |
|
4218 |
} |
|
4219 |
||
4220 |
remove_extra_zeroes(); // clear out all the zmems left over |
|
4221 |
add_req(inits); |
|
4222 |
||
35548
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
34189
diff
changeset
|
4223 |
if (!(UseTLAB && ZeroTLAB)) { |
1 | 4224 |
// If anything remains to be zeroed, zero it all now. |
46619
a3919f5e8d2b
8178499: Remove _ptr_ and _size_ infixes from align functions
stefank
parents:
46589
diff
changeset
|
4225 |
zeroes_done = align_down(zeroes_done, BytesPerInt); |
1 | 4226 |
// if it is the last unused 4 bytes of an instance, forget about it |
4227 |
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); |
|
4228 |
if (zeroes_done + BytesPerLong >= size_limit) { |
|
45241
dec8c0efd17b
8180617: Null pointer dereference in InitializeNode::complete_stores
thartmann
parents:
44739
diff
changeset
|
4229 |
AllocateNode* alloc = allocation(); |
dec8c0efd17b
8180617: Null pointer dereference in InitializeNode::complete_stores
thartmann
parents:
44739
diff
changeset
|
4230 |
assert(alloc != NULL, "must be present"); |
dec8c0efd17b
8180617: Null pointer dereference in InitializeNode::complete_stores
thartmann
parents:
44739
diff
changeset
|
4231 |
if (alloc != NULL && alloc->Opcode() == Op_Allocate) { |
dec8c0efd17b
8180617: Null pointer dereference in InitializeNode::complete_stores
thartmann
parents:
44739
diff
changeset
|
4232 |
Node* klass_node = alloc->in(AllocateNode::KlassNode); |
7429
32743cb705a2
7002666: eclipse CDT projects crash with compressed oops
never
parents:
7397
diff
changeset
|
4233 |
ciKlass* k = phase->type(klass_node)->is_klassptr()->klass(); |
32743cb705a2
7002666: eclipse CDT projects crash with compressed oops
never
parents:
7397
diff
changeset
|
4234 |
if (zeroes_done == k->layout_helper()) |
32743cb705a2
7002666: eclipse CDT projects crash with compressed oops
never
parents:
7397
diff
changeset
|
4235 |
zeroes_done = size_limit; |
32743cb705a2
7002666: eclipse CDT projects crash with compressed oops
never
parents:
7397
diff
changeset
|
4236 |
} |
1 | 4237 |
} |
4238 |
if (zeroes_done < size_limit) { |
|
4239 |
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, |
|
4240 |
zeroes_done, size_in_bytes, phase); |
|
4241 |
} |
|
4242 |
} |
|
4243 |
||
4244 |
set_complete(phase); |
|
4245 |
return rawmem; |
|
4246 |
} |
|
4247 |
||
4248 |
||
4249 |
#ifdef ASSERT |
|
4250 |
bool InitializeNode::stores_are_sane(PhaseTransform* phase) { |
|
4251 |
if (is_complete()) |
|
4252 |
return true; // stores could be anything at this point |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
4253 |
assert(allocation() != NULL, "must be present"); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
255
diff
changeset
|
4254 |
intptr_t last_off = allocation()->minimum_header_size(); |
1 | 4255 |
for (uint i = InitializeNode::RawStores; i < req(); i++) { |
4256 |
Node* st = in(i); |
|
4257 |
intptr_t st_off = get_store_offset(st, phase); |
|
4258 |
if (st_off < 0) continue; // ignore dead garbage |
|
4259 |
if (last_off > st_off) { |
|
24424
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
24345
diff
changeset
|
4260 |
tty->print_cr("*** bad store offset at %d: " INTX_FORMAT " > " INTX_FORMAT, i, last_off, st_off); |
1 | 4261 |
this->dump(2); |
4262 |
assert(false, "ascending store offsets"); |
|
4263 |
return false; |
|
4264 |
} |
|
4265 |
last_off = st_off + st->as_Store()->memory_size(); |
|
4266 |
} |
|
4267 |
return true; |
|
4268 |
} |
|
4269 |
#endif //ASSERT |
|
4270 |
||
4271 |
||
4272 |
||
4273 |
||
4274 |
//============================MergeMemNode===================================== |
|
4275 |
// |
|
4276 |
// SEMANTICS OF MEMORY MERGES: A MergeMem is a memory state assembled from several |
|
4277 |
// contributing store or call operations. Each contributor provides the memory |
|
4278 |
// state for a particular "alias type" (see Compile::alias_type). For example, |
|
4279 |
// if a MergeMem has an input X for alias category #6, then any memory reference |
|
4280 |
// to alias category #6 may use X as its memory state input, as an exact equivalent |
|
4281 |
// to using the MergeMem as a whole. |
|
4282 |
// Load<6>( MergeMem(<6>: X, ...), p ) <==> Load<6>(X,p) |
|
4283 |
// |
|
4284 |
// (Here, the <N> notation gives the index of the relevant adr_type.) |
|
4285 |
// |
|
4286 |
// In one special case (and more cases in the future), alias categories overlap. |
|
4287 |
// The special alias category "Bot" (Compile::AliasIdxBot) includes all memory |
|
4288 |
// states. Therefore, if a MergeMem has only one contributing input W for Bot, |
|
4289 |
// it is exactly equivalent to that state W: |
|
4290 |
// MergeMem(<Bot>: W) <==> W |
|
4291 |
// |
|
4292 |
// Usually, the merge has more than one input. In that case, where inputs |
|
4293 |
// overlap (i.e., one is Bot), the narrower alias type determines the memory |
|
4294 |
// state for that type, and the wider alias type (Bot) fills in everywhere else: |
|
4295 |
// Load<5>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<5>(W,p) |
|
4296 |
// Load<6>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<6>(X,p) |
|
4297 |
// |
|
4298 |
// A merge can take a "wide" memory state as one of its narrow inputs. |
|
4299 |
// This simply means that the merge observes out only the relevant parts of |
|
4300 |
// the wide input. That is, wide memory states arriving at narrow merge inputs |
|
4301 |
// are implicitly "filtered" or "sliced" as necessary. (This is rare.) |
|
4302 |
// |
|
4303 |
// These rules imply that MergeMem nodes may cascade (via their <Bot> links), |
|
4304 |
// and that memory slices "leak through": |
|
4305 |
// MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y)) <==> MergeMem(<Bot>: W, <7>: Y) |
|
4306 |
// |
|
4307 |
// But, in such a cascade, repeated memory slices can "block the leak": |
|
4308 |
// MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y), <7>: Y') <==> MergeMem(<Bot>: W, <7>: Y') |
|
4309 |
// |
|
4310 |
// In the last example, Y is not part of the combined memory state of the |
|
4311 |
// outermost MergeMem. The system must, of course, prevent unschedulable |
|
4312 |
// memory states from arising, so you can be sure that the state Y is somehow |
|
4313 |
// a precursor to state Y'. |
|
4314 |
// |
|
4315 |
// |
|
4316 |
// REPRESENTATION OF MEMORY MERGES: The indexes used to address the Node::in array |
|
4317 |
// of each MergeMemNode array are exactly the numerical alias indexes, including |
|
4318 |
// but not limited to AliasIdxTop, AliasIdxBot, and AliasIdxRaw. The functions |
|
4319 |
// Compile::alias_type (and kin) produce and manage these indexes. |
|
4320 |
// |
|
4321 |
// By convention, the value of in(AliasIdxTop) (i.e., in(1)) is always the top node. |
|
4322 |
// (Note that this provides quick access to the top node inside MergeMem methods, |
|
4323 |
// without the need to reach out via TLS to Compile::current.) |
|
4324 |
// |
|
4325 |
// As a consequence of what was just described, a MergeMem that represents a full |
|
4326 |
// memory state has an edge in(AliasIdxBot) which is a "wide" memory state, |
|
4327 |
// containing all alias categories. |
|
4328 |
// |
|
4329 |
// MergeMem nodes never (?) have control inputs, so in(0) is NULL. |
|
4330 |
// |
|
4331 |
// All other edges in(N) (including in(AliasIdxRaw), which is in(3)) are either |
|
4332 |
// a memory state for the alias type <N>, or else the top node, meaning that |
|
4333 |
// there is no particular input for that alias type. Note that the length of |
|
4334 |
// a MergeMem is variable, and may be extended at any time to accommodate new |
|
4335 |
// memory states at larger alias indexes. When merges grow, they are of course |
|
4336 |
// filled with "top" in the unused in() positions. |
|
4337 |
// |
|
4338 |
// This use of top is named "empty_memory()", or "empty_mem" (no-memory) as a variable. |
|
4339 |
// (Top was chosen because it works smoothly with passes like GCM.) |
|
4340 |
// |
|
4341 |
// For convenience, we hardwire the alias index for TypeRawPtr::BOTTOM. (It is |
|
4342 |
// the type of random VM bits like TLS references.) Since it is always the |
|
4343 |
// first non-Bot memory slice, some low-level loops use it to initialize an |
|
4344 |
// index variable: for (i = AliasIdxRaw; i < req(); i++). |
|
4345 |
// |
|
4346 |
// |
|
4347 |
// ACCESSORS: There is a special accessor MergeMemNode::base_memory which returns |
|
4348 |
// the distinguished "wide" state. The accessor MergeMemNode::memory_at(N) returns |
|
4349 |
// the memory state for alias type <N>, or (if there is no particular slice at <N>, |
|
4350 |
// it returns the base memory. To prevent bugs, memory_at does not accept <Top> |
|
4351 |
// or <Bot> indexes. The iterator MergeMemStream provides robust iteration over |
|
4352 |
// MergeMem nodes or pairs of such nodes, ensuring that the non-top edges are visited. |
|
4353 |
// |
|
4354 |
// %%%% We may get rid of base_memory as a separate accessor at some point; it isn't |
|
4355 |
// really that different from the other memory inputs. An abbreviation called |
|
4356 |
// "bot_memory()" for "memory_at(AliasIdxBot)" would keep code tidy. |
|
4357 |
// |
|
4358 |
// |
|
4359 |
// PARTIAL MEMORY STATES: During optimization, MergeMem nodes may arise that represent |
|
4360 |
// partial memory states. When a Phi splits through a MergeMem, the copy of the Phi |
|
4361 |
// that "emerges though" the base memory will be marked as excluding the alias types |
|
4362 |
// of the other (narrow-memory) copies which "emerged through" the narrow edges: |
|
4363 |
// |
|
4364 |
// Phi<Bot>(U, MergeMem(<Bot>: W, <8>: Y)) |
|
4365 |
// ==Ideal=> MergeMem(<Bot>: Phi<Bot-8>(U, W), Phi<8>(U, Y)) |
|
4366 |
// |
|
4367 |
// This strange "subtraction" effect is necessary to ensure IGVN convergence. |
|
4368 |
// (It is currently unimplemented.) As you can see, the resulting merge is |
|
4369 |
// actually a disjoint union of memory states, rather than an overlay. |
|
4370 |
// |
|
4371 |
||
4372 |
//------------------------------MergeMemNode----------------------------------- |
|
4373 |
Node* MergeMemNode::make_empty_memory() { |
|
4374 |
Node* empty_memory = (Node*) Compile::current()->top(); |
|
4375 |
assert(empty_memory->is_top(), "correct sentinel identity"); |
|
4376 |
return empty_memory; |
|
4377 |
} |
|
4378 |
||
4379 |
MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) { |
|
4380 |
init_class_id(Class_MergeMem); |
|
4381 |
// all inputs are nullified in Node::Node(int) |
|
4382 |
// set_input(0, NULL); // no control input |
|
4383 |
||
4384 |
// Initialize the edges uniformly to top, for starters. |
|
4385 |
Node* empty_mem = make_empty_memory(); |
|
4386 |
for (uint i = Compile::AliasIdxTop; i < req(); i++) { |
|
4387 |
init_req(i,empty_mem); |
|
4388 |
} |
|
4389 |
assert(empty_memory() == empty_mem, ""); |
|
4390 |
||
4391 |
if( new_base != NULL && new_base->is_MergeMem() ) { |
|
4392 |
MergeMemNode* mdef = new_base->as_MergeMem(); |
|
4393 |
assert(mdef->empty_memory() == empty_mem, "consistent sentinels"); |
|
4394 |
for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) { |
|
4395 |
mms.set_memory(mms.memory2()); |
|
4396 |
} |
|
4397 |
assert(base_memory() == mdef->base_memory(), ""); |
|
4398 |
} else { |
|
4399 |
set_base_memory(new_base); |
|
4400 |
} |
|
4401 |
} |
|
4402 |
||
4403 |
// Make a new, untransformed MergeMem with the same base as 'mem'. |
|
4404 |
// If mem is itself a MergeMem, populate the result with the same edges. |
|
25930 | 4405 |
MergeMemNode* MergeMemNode::make(Node* mem) { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
4406 |
return new MergeMemNode(mem); |
1 | 4407 |
} |
4408 |
||
4409 |
//------------------------------cmp-------------------------------------------- |
|
4410 |
uint MergeMemNode::hash() const { return NO_HASH; } |
|
4411 |
uint MergeMemNode::cmp( const Node &n ) const { |
|
4412 |
return (&n == this); // Always fail except on self |
|
4413 |
} |
|
4414 |
||
4415 |
//------------------------------Identity--------------------------------------- |
|
35551
36ef3841fb34
8146629: Make phase->is_IterGVN() accessible from Node::Identity and Node::Value
thartmann
parents:
35548
diff
changeset
|
4416 |
Node* MergeMemNode::Identity(PhaseGVN* phase) { |
1 | 4417 |
// Identity if this merge point does not record any interesting memory |
4418 |
// disambiguations. |
|
4419 |
Node* base_mem = base_memory(); |
|
4420 |
Node* empty_mem = empty_memory(); |
|
4421 |
if (base_mem != empty_mem) { // Memory path is not dead? |
|
4422 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4423 |
Node* mem = in(i); |
|
4424 |
if (mem != empty_mem && mem != base_mem) { |
|
4425 |
return this; // Many memory splits; no change |
|
4426 |
} |
|
4427 |
} |
|
4428 |
} |
|
4429 |
return base_mem; // No memory splits; ID on the one true input |
|
4430 |
} |
|
4431 |
||
4432 |
//------------------------------Ideal------------------------------------------ |
|
4433 |
// This method is invoked recursively on chains of MergeMem nodes |
|
4434 |
Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
4435 |
// Remove chain'd MergeMems |
|
4436 |
// |
|
4437 |
// This is delicate, because the each "in(i)" (i >= Raw) is interpreted |
|
4438 |
// relative to the "in(Bot)". Since we are patching both at the same time, |
|
4439 |
// we have to be careful to read each "in(i)" relative to the old "in(Bot)", |
|
4440 |
// but rewrite each "in(i)" relative to the new "in(Bot)". |
|
4441 |
Node *progress = NULL; |
|
4442 |
||
4443 |
||
4444 |
Node* old_base = base_memory(); |
|
4445 |
Node* empty_mem = empty_memory(); |
|
4446 |
if (old_base == empty_mem) |
|
4447 |
return NULL; // Dead memory path. |
|
4448 |
||
4449 |
MergeMemNode* old_mbase; |
|
4450 |
if (old_base != NULL && old_base->is_MergeMem()) |
|
4451 |
old_mbase = old_base->as_MergeMem(); |
|
4452 |
else |
|
4453 |
old_mbase = NULL; |
|
4454 |
Node* new_base = old_base; |
|
4455 |
||
4456 |
// simplify stacked MergeMems in base memory |
|
4457 |
if (old_mbase) new_base = old_mbase->base_memory(); |
|
4458 |
||
4459 |
// the base memory might contribute new slices beyond my req() |
|
4460 |
if (old_mbase) grow_to_match(old_mbase); |
|
4461 |
||
4462 |
// Look carefully at the base node if it is a phi. |
|
4463 |
PhiNode* phi_base; |
|
4464 |
if (new_base != NULL && new_base->is_Phi()) |
|
4465 |
phi_base = new_base->as_Phi(); |
|
4466 |
else |
|
4467 |
phi_base = NULL; |
|
4468 |
||
4469 |
Node* phi_reg = NULL; |
|
4470 |
uint phi_len = (uint)-1; |
|
4471 |
if (phi_base != NULL && !phi_base->is_copy()) { |
|
4472 |
// do not examine phi if degraded to a copy |
|
4473 |
phi_reg = phi_base->region(); |
|
4474 |
phi_len = phi_base->req(); |
|
4475 |
// see if the phi is unfinished |
|
4476 |
for (uint i = 1; i < phi_len; i++) { |
|
4477 |
if (phi_base->in(i) == NULL) { |
|
4478 |
// incomplete phi; do not look at it yet! |
|
4479 |
phi_reg = NULL; |
|
4480 |
phi_len = (uint)-1; |
|
4481 |
break; |
|
4482 |
} |
|
4483 |
} |
|
4484 |
} |
|
4485 |
||
4486 |
// Note: We do not call verify_sparse on entry, because inputs |
|
4487 |
// can normalize to the base_memory via subsume_node or similar |
|
4488 |
// mechanisms. This method repairs that damage. |
|
4489 |
||
4490 |
assert(!old_mbase || old_mbase->is_empty_memory(empty_mem), "consistent sentinels"); |
|
4491 |
||
4492 |
// Look at each slice. |
|
4493 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4494 |
Node* old_in = in(i); |
|
4495 |
// calculate the old memory value |
|
4496 |
Node* old_mem = old_in; |
|
4497 |
if (old_mem == empty_mem) old_mem = old_base; |
|
4498 |
assert(old_mem == memory_at(i), ""); |
|
4499 |
||
4500 |
// maybe update (reslice) the old memory value |
|
4501 |
||
4502 |
// simplify stacked MergeMems |
|
4503 |
Node* new_mem = old_mem; |
|
4504 |
MergeMemNode* old_mmem; |
|
4505 |
if (old_mem != NULL && old_mem->is_MergeMem()) |
|
4506 |
old_mmem = old_mem->as_MergeMem(); |
|
4507 |
else |
|
4508 |
old_mmem = NULL; |
|
4509 |
if (old_mmem == this) { |
|
4510 |
// This can happen if loops break up and safepoints disappear. |
|
4511 |
// A merge of BotPtr (default) with a RawPtr memory derived from a |
|
4512 |
// safepoint can be rewritten to a merge of the same BotPtr with |
|
4513 |
// the BotPtr phi coming into the loop. If that phi disappears |
|
4514 |
// also, we can end up with a self-loop of the mergemem. |
|
4515 |
// In general, if loops degenerate and memory effects disappear, |
|
4516 |
// a mergemem can be left looking at itself. This simply means |
|
4517 |
// that the mergemem's default should be used, since there is |
|
4518 |
// no longer any apparent effect on this slice. |
|
4519 |
// Note: If a memory slice is a MergeMem cycle, it is unreachable |
|
4520 |
// from start. Update the input to TOP. |
|
4521 |
new_mem = (new_base == this || new_base == empty_mem)? empty_mem : new_base; |
|
4522 |
} |
|
4523 |
else if (old_mmem != NULL) { |
|
4524 |
new_mem = old_mmem->memory_at(i); |
|
4525 |
} |
|
2131 | 4526 |
// else preceding memory was not a MergeMem |
1 | 4527 |
|
4528 |
// replace equivalent phis (unfortunately, they do not GVN together) |
|
4529 |
if (new_mem != NULL && new_mem != new_base && |
|
4530 |
new_mem->req() == phi_len && new_mem->in(0) == phi_reg) { |
|
4531 |
if (new_mem->is_Phi()) { |
|
4532 |
PhiNode* phi_mem = new_mem->as_Phi(); |
|
4533 |
for (uint i = 1; i < phi_len; i++) { |
|
4534 |
if (phi_base->in(i) != phi_mem->in(i)) { |
|
4535 |
phi_mem = NULL; |
|
4536 |
break; |
|
4537 |
} |
|
4538 |
} |
|
4539 |
if (phi_mem != NULL) { |
|
4540 |
// equivalent phi nodes; revert to the def |
|
4541 |
new_mem = new_base; |
|
4542 |
} |
|
4543 |
} |
|
4544 |
} |
|
4545 |
||
4546 |
// maybe store down a new value |
|
4547 |
Node* new_in = new_mem; |
|
4548 |
if (new_in == new_base) new_in = empty_mem; |
|
4549 |
||
4550 |
if (new_in != old_in) { |
|
4551 |
// Warning: Do not combine this "if" with the previous "if" |
|
4552 |
// A memory slice might have be be rewritten even if it is semantically |
|
4553 |
// unchanged, if the base_memory value has changed. |
|
4554 |
set_req(i, new_in); |
|
4555 |
progress = this; // Report progress |
|
4556 |
} |
|
4557 |
} |
|
4558 |
||
4559 |
if (new_base != old_base) { |
|
4560 |
set_req(Compile::AliasIdxBot, new_base); |
|
4561 |
// Don't use set_base_memory(new_base), because we need to update du. |
|
4562 |
assert(base_memory() == new_base, ""); |
|
4563 |
progress = this; |
|
4564 |
} |
|
4565 |
||
4566 |
if( base_memory() == this ) { |
|
4567 |
// a self cycle indicates this memory path is dead |
|
4568 |
set_req(Compile::AliasIdxBot, empty_mem); |
|
4569 |
} |
|
4570 |
||
4571 |
// Resolve external cycles by calling Ideal on a MergeMem base_memory |
|
4572 |
// Recursion must occur after the self cycle check above |
|
4573 |
if( base_memory()->is_MergeMem() ) { |
|
4574 |
MergeMemNode *new_mbase = base_memory()->as_MergeMem(); |
|
4575 |
Node *m = phase->transform(new_mbase); // Rollup any cycles |
|
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
4576 |
if( m != NULL && |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
4577 |
(m->is_top() || |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
46625
diff
changeset
|
4578 |
(m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem)) ) { |
1 | 4579 |
// propagate rollup of dead cycle to self |
4580 |
set_req(Compile::AliasIdxBot, empty_mem); |
|
4581 |
} |
|
4582 |
} |
|
4583 |
||
4584 |
if( base_memory() == empty_mem ) { |
|
4585 |
progress = this; |
|
4586 |
// Cut inputs during Parse phase only. |
|
4587 |
// During Optimize phase a dead MergeMem node will be subsumed by Top. |
|
4588 |
if( !can_reshape ) { |
|
4589 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4590 |
if( in(i) != empty_mem ) { set_req(i, empty_mem); } |
|
4591 |
} |
|
4592 |
} |
|
4593 |
} |
|
4594 |
||
4595 |
if( !progress && base_memory()->is_Phi() && can_reshape ) { |
|
4596 |
// Check if PhiNode::Ideal's "Split phis through memory merges" |
|
4597 |
// transform should be attempted. Look for this->phi->this cycle. |
|
4598 |
uint merge_width = req(); |
|
4599 |
if (merge_width > Compile::AliasIdxRaw) { |
|
4600 |
PhiNode* phi = base_memory()->as_Phi(); |
|
4601 |
for( uint i = 1; i < phi->req(); ++i ) {// For all paths in |
|
4602 |
if (phi->in(i) == this) { |
|
4603 |
phase->is_IterGVN()->_worklist.push(phi); |
|
4604 |
break; |
|
4605 |
} |
|
4606 |
} |
|
4607 |
} |
|
4608 |
} |
|
4609 |
||
237
fba97e902303
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
216
diff
changeset
|
4610 |
assert(progress || verify_sparse(), "please, no dups of base"); |
1 | 4611 |
return progress; |
4612 |
} |
|
4613 |
||
4614 |
//-------------------------set_base_memory------------------------------------- |
|
4615 |
void MergeMemNode::set_base_memory(Node *new_base) { |
|
4616 |
Node* empty_mem = empty_memory(); |
|
4617 |
set_req(Compile::AliasIdxBot, new_base); |
|
4618 |
assert(memory_at(req()) == new_base, "must set default memory"); |
|
4619 |
// Clear out other occurrences of new_base: |
|
4620 |
if (new_base != empty_mem) { |
|
4621 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4622 |
if (in(i) == new_base) set_req(i, empty_mem); |
|
4623 |
} |
|
4624 |
} |
|
4625 |
} |
|
4626 |
||
4627 |
//------------------------------out_RegMask------------------------------------ |
|
4628 |
const RegMask &MergeMemNode::out_RegMask() const { |
|
4629 |
return RegMask::Empty; |
|
4630 |
} |
|
4631 |
||
4632 |
//------------------------------dump_spec-------------------------------------- |
|
4633 |
#ifndef PRODUCT |
|
4634 |
void MergeMemNode::dump_spec(outputStream *st) const { |
|
4635 |
st->print(" {"); |
|
4636 |
Node* base_mem = base_memory(); |
|
4637 |
for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) { |
|
28202
3518158ff5d0
8067338: compiler/debug/TraceIterativeGVN.java segfaults
vlivanov
parents:
27697
diff
changeset
|
4638 |
Node* mem = (in(i) != NULL) ? memory_at(i) : base_mem; |
1 | 4639 |
if (mem == base_mem) { st->print(" -"); continue; } |
4640 |
st->print( " N%d:", mem->_idx ); |
|
4641 |
Compile::current()->get_adr_type(i)->dump_on(st); |
|
4642 |
} |
|
4643 |
st->print(" }"); |
|
4644 |
} |
|
4645 |
#endif // !PRODUCT |
|
4646 |
||
4647 |
||
4648 |
#ifdef ASSERT |
|
4649 |
static bool might_be_same(Node* a, Node* b) { |
|
4650 |
if (a == b) return true; |
|
4651 |
if (!(a->is_Phi() || b->is_Phi())) return false; |
|
4652 |
// phis shift around during optimization |
|
4653 |
return true; // pretty stupid... |
|
4654 |
} |
|
4655 |
||
4656 |
// verify a narrow slice (either incoming or outgoing) |
|
4657 |
static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) { |
|
46589 | 4658 |
if (!VerifyAliases) return; // don't bother to verify unless requested |
4659 |
if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error |
|
4660 |
if (Node::in_dump()) return; // muzzle asserts when printing |
|
1 | 4661 |
assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel"); |
4662 |
assert(n != NULL, ""); |
|
4663 |
// Elide intervening MergeMem's |
|
4664 |
while (n->is_MergeMem()) { |
|
4665 |
n = n->as_MergeMem()->memory_at(alias_idx); |
|
4666 |
} |
|
4667 |
Compile* C = Compile::current(); |
|
4668 |
const TypePtr* n_adr_type = n->adr_type(); |
|
4669 |
if (n == m->empty_memory()) { |
|
4670 |
// Implicit copy of base_memory() |
|
4671 |
} else if (n_adr_type != TypePtr::BOTTOM) { |
|
4672 |
assert(n_adr_type != NULL, "new memory must have a well-defined adr_type"); |
|
4673 |
assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice"); |
|
4674 |
} else { |
|
4675 |
// A few places like make_runtime_call "know" that VM calls are narrow, |
|
4676 |
// and can be used to update only the VM bits stored as TypeRawPtr::BOTTOM. |
|
4677 |
bool expected_wide_mem = false; |
|
4678 |
if (n == m->base_memory()) { |
|
4679 |
expected_wide_mem = true; |
|
4680 |
} else if (alias_idx == Compile::AliasIdxRaw || |
|
4681 |
n == m->memory_at(Compile::AliasIdxRaw)) { |
|
4682 |
expected_wide_mem = true; |
|
4683 |
} else if (!C->alias_type(alias_idx)->is_rewritable()) { |
|
4684 |
// memory can "leak through" calls on channels that |
|
4685 |
// are write-once. Allow this also. |
|
4686 |
expected_wide_mem = true; |
|
4687 |
} |
|
4688 |
assert(expected_wide_mem, "expected narrow slice replacement"); |
|
4689 |
} |
|
4690 |
} |
|
4691 |
#else // !ASSERT |
|
18073
f02460441ddc
8014431: cleanup warnings indicated by the -Wunused-value compiler option on linux
ccheung
parents:
17383
diff
changeset
|
4692 |
#define verify_memory_slice(m,i,n) (void)(0) // PRODUCT version is no-op |
1 | 4693 |
#endif |
4694 |
||
4695 |
||
4696 |
//-----------------------------memory_at--------------------------------------- |
|
4697 |
Node* MergeMemNode::memory_at(uint alias_idx) const { |
|
4698 |
assert(alias_idx >= Compile::AliasIdxRaw || |
|
4699 |
alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, |
|
4700 |
"must avoid base_memory and AliasIdxTop"); |
|
4701 |
||
4702 |
// Otherwise, it is a narrow slice. |
|
4703 |
Node* n = alias_idx < req() ? in(alias_idx) : empty_memory(); |
|
4704 |
Compile *C = Compile::current(); |
|
4705 |
if (is_empty_memory(n)) { |
|
4706 |
// the array is sparse; empty slots are the "top" node |
|
4707 |
n = base_memory(); |
|
4708 |
assert(Node::in_dump() |
|
4709 |
|| n == NULL || n->bottom_type() == Type::TOP |
|
8491
058b35a9d008
6812217: Base memory of MergeMem node violates assert during killing expanded AllocateArray node
kvn
parents:
7429
diff
changeset
|
4710 |
|| n->adr_type() == NULL // address is TOP |
1 | 4711 |
|| n->adr_type() == TypePtr::BOTTOM |
4712 |
|| n->adr_type() == TypeRawPtr::BOTTOM |
|
4713 |
|| Compile::current()->AliasLevel() == 0, |
|
4714 |
"must be a wide memory"); |
|
4715 |
// AliasLevel == 0 if we are organizing the memory states manually. |
|
4716 |
// See verify_memory_slice for comments on TypeRawPtr::BOTTOM. |
|
4717 |
} else { |
|
4718 |
// make sure the stored slice is sane |
|
4719 |
#ifdef ASSERT |
|
46589 | 4720 |
if (VMError::is_error_reported() || Node::in_dump()) { |
1 | 4721 |
} else if (might_be_same(n, base_memory())) { |
4722 |
// Give it a pass: It is a mostly harmless repetition of the base. |
|
4723 |
// This can arise normally from node subsumption during optimization. |
|
4724 |
} else { |
|
4725 |
verify_memory_slice(this, alias_idx, n); |
|
4726 |
} |
|
4727 |
#endif |
|
4728 |
} |
|
4729 |
return n; |
|
4730 |
} |
|
4731 |
||
4732 |
//---------------------------set_memory_at------------------------------------- |
|
4733 |
void MergeMemNode::set_memory_at(uint alias_idx, Node *n) { |
|
4734 |
verify_memory_slice(this, alias_idx, n); |
|
4735 |
Node* empty_mem = empty_memory(); |
|
4736 |
if (n == base_memory()) n = empty_mem; // collapse default |
|
4737 |
uint need_req = alias_idx+1; |
|
4738 |
if (req() < need_req) { |
|
4739 |
if (n == empty_mem) return; // already the default, so do not grow me |
|
4740 |
// grow the sparse array |
|
4741 |
do { |
|
4742 |
add_req(empty_mem); |
|
4743 |
} while (req() < need_req); |
|
4744 |
} |
|
4745 |
set_req( alias_idx, n ); |
|
4746 |
} |
|
4747 |
||
4748 |
||
4749 |
||
4750 |
//--------------------------iteration_setup------------------------------------ |
|
4751 |
void MergeMemNode::iteration_setup(const MergeMemNode* other) { |
|
4752 |
if (other != NULL) { |
|
4753 |
grow_to_match(other); |
|
4754 |
// invariant: the finite support of mm2 is within mm->req() |
|
4755 |
#ifdef ASSERT |
|
4756 |
for (uint i = req(); i < other->req(); i++) { |
|
4757 |
assert(other->is_empty_memory(other->in(i)), "slice left uncovered"); |
|
4758 |
} |
|
4759 |
#endif |
|
4760 |
} |
|
4761 |
// Replace spurious copies of base_memory by top. |
|
4762 |
Node* base_mem = base_memory(); |
|
4763 |
if (base_mem != NULL && !base_mem->is_top()) { |
|
4764 |
for (uint i = Compile::AliasIdxBot+1, imax = req(); i < imax; i++) { |
|
4765 |
if (in(i) == base_mem) |
|
4766 |
set_req(i, empty_memory()); |
|
4767 |
} |
|
4768 |
} |
|
4769 |
} |
|
4770 |
||
4771 |
//---------------------------grow_to_match------------------------------------- |
|
4772 |
void MergeMemNode::grow_to_match(const MergeMemNode* other) { |
|
4773 |
Node* empty_mem = empty_memory(); |
|
4774 |
assert(other->is_empty_memory(empty_mem), "consistent sentinels"); |
|
4775 |
// look for the finite support of the other memory |
|
4776 |
for (uint i = other->req(); --i >= req(); ) { |
|
4777 |
if (other->in(i) != empty_mem) { |
|
4778 |
uint new_len = i+1; |
|
4779 |
while (req() < new_len) add_req(empty_mem); |
|
4780 |
break; |
|
4781 |
} |
|
4782 |
} |
|
4783 |
} |
|
4784 |
||
4785 |
//---------------------------verify_sparse------------------------------------- |
|
4786 |
#ifndef PRODUCT |
|
4787 |
bool MergeMemNode::verify_sparse() const { |
|
4788 |
assert(is_empty_memory(make_empty_memory()), "sane sentinel"); |
|
4789 |
Node* base_mem = base_memory(); |
|
4790 |
// The following can happen in degenerate cases, since empty==top. |
|
4791 |
if (is_empty_memory(base_mem)) return true; |
|
4792 |
for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4793 |
assert(in(i) != NULL, "sane slice"); |
|
4794 |
if (in(i) == base_mem) return false; // should have been the sentinel value! |
|
4795 |
} |
|
4796 |
return true; |
|
4797 |
} |
|
4798 |
||
4799 |
bool MergeMemStream::match_memory(Node* mem, const MergeMemNode* mm, int idx) { |
|
4800 |
Node* n; |
|
4801 |
n = mm->in(idx); |
|
4802 |
if (mem == n) return true; // might be empty_memory() |
|
4803 |
n = (idx == Compile::AliasIdxBot)? mm->base_memory(): mm->memory_at(idx); |
|
4804 |
if (mem == n) return true; |
|
4805 |
while (n->is_Phi() && (n = n->as_Phi()->is_copy()) != NULL) { |
|
4806 |
if (mem == n) return true; |
|
4807 |
if (n == NULL) break; |
|
4808 |
} |
|
4809 |
return false; |
|
4810 |
} |
|
4811 |
#endif // !PRODUCT |