author | cvarming |
Mon, 20 Jun 2016 08:11:22 -0400 | |
changeset 39431 | cb1b2538c4b2 |
parent 39419 | cc993a4ab581 |
child 41323 | ddd5600d4762 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
37248 | 2 |
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4751
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4751
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4751
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "memory/allocation.inline.hpp" |
|
37248 | 27 |
#include "memory/resourceArea.hpp" |
25715
d5a8dbdc5150
8049325: Introduce and clean up umbrella headers for the files in the cpu subdirectories.
goetz
parents:
25351
diff
changeset
|
28 |
#include "opto/ad.hpp" |
7397 | 29 |
#include "opto/addnode.hpp" |
30 |
#include "opto/callnode.hpp" |
|
31 |
#include "opto/idealGraphPrinter.hpp" |
|
32 |
#include "opto/matcher.hpp" |
|
33 |
#include "opto/memnode.hpp" |
|
23528 | 34 |
#include "opto/movenode.hpp" |
7397 | 35 |
#include "opto/opcodes.hpp" |
36 |
#include "opto/regmask.hpp" |
|
37 |
#include "opto/rootnode.hpp" |
|
38 |
#include "opto/runtime.hpp" |
|
39 |
#include "opto/type.hpp" |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
40 |
#include "opto/vectornode.hpp" |
7397 | 41 |
#include "runtime/os.hpp" |
29081
c61eb4914428
8072911: Remove includes of oop.inline.hpp from .hpp files
stefank
parents:
25930
diff
changeset
|
42 |
#include "runtime/sharedRuntime.hpp" |
1 | 43 |
|
44 |
OptoReg::Name OptoReg::c_frame_pointer; |
|
45 |
||
46 |
const RegMask *Matcher::idealreg2regmask[_last_machine_leaf]; |
|
47 |
RegMask Matcher::mreg2regmask[_last_Mach_Reg]; |
|
48 |
RegMask Matcher::STACK_ONLY_mask; |
|
49 |
RegMask Matcher::c_frame_ptr_mask; |
|
50 |
const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE; |
|
51 |
const uint Matcher::_end_rematerialize = _END_REMATERIALIZE; |
|
52 |
||
53 |
//---------------------------Matcher------------------------------------------- |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
54 |
Matcher::Matcher() |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
55 |
: PhaseTransform( Phase::Ins_Select ), |
1 | 56 |
#ifdef ASSERT |
57 |
_old2new_map(C->comp_arena()), |
|
768 | 58 |
_new2old_map(C->comp_arena()), |
1 | 59 |
#endif |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
60 |
_shared_nodes(C->comp_arena()), |
1 | 61 |
_reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp), |
62 |
_swallowed(swallowed), |
|
63 |
_begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE), |
|
64 |
_end_inst_chain_rule(_END_INST_CHAIN_RULE), |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
65 |
_must_clone(must_clone), |
1 | 66 |
_register_save_policy(register_save_policy), |
67 |
_c_reg_save_policy(c_reg_save_policy), |
|
68 |
_register_save_type(register_save_type), |
|
69 |
_ruleName(ruleName), |
|
70 |
_allocation_started(false), |
|
71 |
_states_arena(Chunk::medium_size), |
|
72 |
_visited(&_states_arena), |
|
73 |
_shared(&_states_arena), |
|
74 |
_dontcare(&_states_arena) { |
|
75 |
C->set_matcher(this); |
|
76 |
||
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
77 |
idealreg2spillmask [Op_RegI] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
78 |
idealreg2spillmask [Op_RegN] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
79 |
idealreg2spillmask [Op_RegL] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
80 |
idealreg2spillmask [Op_RegF] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
81 |
idealreg2spillmask [Op_RegD] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
82 |
idealreg2spillmask [Op_RegP] = NULL; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
83 |
idealreg2spillmask [Op_VecS] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
84 |
idealreg2spillmask [Op_VecD] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
85 |
idealreg2spillmask [Op_VecX] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
86 |
idealreg2spillmask [Op_VecY] = NULL; |
30624 | 87 |
idealreg2spillmask [Op_VecZ] = NULL; |
1 | 88 |
|
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
89 |
idealreg2debugmask [Op_RegI] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
90 |
idealreg2debugmask [Op_RegN] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
91 |
idealreg2debugmask [Op_RegL] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
92 |
idealreg2debugmask [Op_RegF] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
93 |
idealreg2debugmask [Op_RegD] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
94 |
idealreg2debugmask [Op_RegP] = NULL; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
95 |
idealreg2debugmask [Op_VecS] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
96 |
idealreg2debugmask [Op_VecD] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
97 |
idealreg2debugmask [Op_VecX] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
98 |
idealreg2debugmask [Op_VecY] = NULL; |
30624 | 99 |
idealreg2debugmask [Op_VecZ] = NULL; |
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
100 |
|
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
101 |
idealreg2mhdebugmask[Op_RegI] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
102 |
idealreg2mhdebugmask[Op_RegN] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
103 |
idealreg2mhdebugmask[Op_RegL] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
104 |
idealreg2mhdebugmask[Op_RegF] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
105 |
idealreg2mhdebugmask[Op_RegD] = NULL; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
106 |
idealreg2mhdebugmask[Op_RegP] = NULL; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
107 |
idealreg2mhdebugmask[Op_VecS] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
108 |
idealreg2mhdebugmask[Op_VecD] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
109 |
idealreg2mhdebugmask[Op_VecX] = NULL; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
110 |
idealreg2mhdebugmask[Op_VecY] = NULL; |
30624 | 111 |
idealreg2mhdebugmask[Op_VecZ] = NULL; |
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
112 |
|
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
113 |
debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node |
1 | 114 |
} |
115 |
||
116 |
//------------------------------warp_incoming_stk_arg------------------------ |
|
117 |
// This warps a VMReg into an OptoReg::Name |
|
118 |
OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) { |
|
119 |
OptoReg::Name warped; |
|
120 |
if( reg->is_stack() ) { // Stack slot argument? |
|
121 |
warped = OptoReg::add(_old_SP, reg->reg2stack() ); |
|
122 |
warped = OptoReg::add(warped, C->out_preserve_stack_slots()); |
|
123 |
if( warped >= _in_arg_limit ) |
|
124 |
_in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
125 |
if (!RegMask::can_represent_arg(warped)) { |
1 | 126 |
// the compiler cannot represent this method's calling sequence |
39431
cb1b2538c4b2
8159720: Failure of C2 compilation with tiered prevents some C1 compilations.
cvarming
parents:
39419
diff
changeset
|
127 |
C->record_method_not_compilable("unsupported incoming calling sequence"); |
1 | 128 |
return OptoReg::Bad; |
129 |
} |
|
130 |
return warped; |
|
131 |
} |
|
132 |
return OptoReg::as_OptoReg(reg); |
|
133 |
} |
|
134 |
||
135 |
//---------------------------compute_old_SP------------------------------------ |
|
136 |
OptoReg::Name Compile::compute_old_SP() { |
|
137 |
int fixed = fixed_slots(); |
|
138 |
int preserve = in_preserve_stack_slots(); |
|
139 |
return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots())); |
|
140 |
} |
|
141 |
||
142 |
||
143 |
||
144 |
#ifdef ASSERT |
|
145 |
void Matcher::verify_new_nodes_only(Node* xroot) { |
|
146 |
// Make sure that the new graph only references new nodes |
|
147 |
ResourceMark rm; |
|
148 |
Unique_Node_List worklist; |
|
149 |
VectorSet visited(Thread::current()->resource_area()); |
|
150 |
worklist.push(xroot); |
|
151 |
while (worklist.size() > 0) { |
|
152 |
Node* n = worklist.pop(); |
|
153 |
visited <<= n->_idx; |
|
154 |
assert(C->node_arena()->contains(n), "dead node"); |
|
155 |
for (uint j = 0; j < n->req(); j++) { |
|
156 |
Node* in = n->in(j); |
|
157 |
if (in != NULL) { |
|
158 |
assert(C->node_arena()->contains(in), "dead node"); |
|
159 |
if (!visited.test(in->_idx)) { |
|
160 |
worklist.push(in); |
|
161 |
} |
|
162 |
} |
|
163 |
} |
|
164 |
} |
|
165 |
} |
|
166 |
#endif |
|
167 |
||
168 |
||
169 |
//---------------------------match--------------------------------------------- |
|
170 |
void Matcher::match( ) { |
|
3176
2a5f513df340
6841800: Incorrect boundary values behavior for option -XX:MaxLabelRootDepth=0-6 leads to jvm crash
kvn
parents:
2573
diff
changeset
|
171 |
if( MaxLabelRootDepth < 100 ) { // Too small? |
2a5f513df340
6841800: Incorrect boundary values behavior for option -XX:MaxLabelRootDepth=0-6 leads to jvm crash
kvn
parents:
2573
diff
changeset
|
172 |
assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum"); |
2a5f513df340
6841800: Incorrect boundary values behavior for option -XX:MaxLabelRootDepth=0-6 leads to jvm crash
kvn
parents:
2573
diff
changeset
|
173 |
MaxLabelRootDepth = 100; |
2a5f513df340
6841800: Incorrect boundary values behavior for option -XX:MaxLabelRootDepth=0-6 leads to jvm crash
kvn
parents:
2573
diff
changeset
|
174 |
} |
1 | 175 |
// One-time initialization of some register masks. |
176 |
init_spill_mask( C->root()->in(1) ); |
|
177 |
_return_addr_mask = return_addr(); |
|
178 |
#ifdef _LP64 |
|
179 |
// Pointers take 2 slots in 64-bit land |
|
180 |
_return_addr_mask.Insert(OptoReg::add(return_addr(),1)); |
|
181 |
#endif |
|
182 |
||
183 |
// Map a Java-signature return type into return register-value |
|
184 |
// machine registers for 0, 1 and 2 returned values. |
|
185 |
const TypeTuple *range = C->tf()->range(); |
|
186 |
if( range->cnt() > TypeFunc::Parms ) { // If not a void function |
|
187 |
// Get ideal-register return type |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13391
diff
changeset
|
188 |
int ireg = range->field_at(TypeFunc::Parms)->ideal_reg(); |
1 | 189 |
// Get machine return register |
190 |
uint sop = C->start()->Opcode(); |
|
191 |
OptoRegPair regs = return_value(ireg, false); |
|
192 |
||
193 |
// And mask for same |
|
194 |
_return_value_mask = RegMask(regs.first()); |
|
195 |
if( OptoReg::is_valid(regs.second()) ) |
|
196 |
_return_value_mask.Insert(regs.second()); |
|
197 |
} |
|
198 |
||
199 |
// --------------- |
|
200 |
// Frame Layout |
|
201 |
||
202 |
// Need the method signature to determine the incoming argument types, |
|
203 |
// because the types determine which registers the incoming arguments are |
|
204 |
// in, and this affects the matched code. |
|
205 |
const TypeTuple *domain = C->tf()->domain(); |
|
206 |
uint argcnt = domain->cnt() - TypeFunc::Parms; |
|
207 |
BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt ); |
|
208 |
VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt ); |
|
209 |
_parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt ); |
|
210 |
_calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt ); |
|
211 |
uint i; |
|
212 |
for( i = 0; i<argcnt; i++ ) { |
|
213 |
sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type(); |
|
214 |
} |
|
215 |
||
216 |
// Pass array of ideal registers and length to USER code (from the AD file) |
|
217 |
// that will convert this to an array of register numbers. |
|
218 |
const StartNode *start = C->start(); |
|
219 |
start->calling_convention( sig_bt, vm_parm_regs, argcnt ); |
|
220 |
#ifdef ASSERT |
|
221 |
// Sanity check users' calling convention. Real handy while trying to |
|
222 |
// get the initial port correct. |
|
223 |
{ for (uint i = 0; i<argcnt; i++) { |
|
224 |
if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) { |
|
225 |
assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" ); |
|
226 |
_parm_regs[i].set_bad(); |
|
227 |
continue; |
|
228 |
} |
|
229 |
VMReg parm_reg = vm_parm_regs[i].first(); |
|
230 |
assert(parm_reg->is_valid(), "invalid arg?"); |
|
231 |
if (parm_reg->is_reg()) { |
|
232 |
OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg); |
|
233 |
assert(can_be_java_arg(opto_parm_reg) || |
|
234 |
C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) || |
|
235 |
opto_parm_reg == inline_cache_reg(), |
|
236 |
"parameters in register must be preserved by runtime stubs"); |
|
237 |
} |
|
238 |
for (uint j = 0; j < i; j++) { |
|
239 |
assert(parm_reg != vm_parm_regs[j].first(), |
|
240 |
"calling conv. must produce distinct regs"); |
|
241 |
} |
|
242 |
} |
|
243 |
} |
|
244 |
#endif |
|
245 |
||
246 |
// Do some initial frame layout. |
|
247 |
||
248 |
// Compute the old incoming SP (may be called FP) as |
|
249 |
// OptoReg::stack0() + locks + in_preserve_stack_slots + pad2. |
|
250 |
_old_SP = C->compute_old_SP(); |
|
251 |
assert( is_even(_old_SP), "must be even" ); |
|
252 |
||
253 |
// Compute highest incoming stack argument as |
|
254 |
// _old_SP + out_preserve_stack_slots + incoming argument size. |
|
255 |
_in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots()); |
|
256 |
assert( is_even(_in_arg_limit), "out_preserve must be even" ); |
|
257 |
for( i = 0; i < argcnt; i++ ) { |
|
258 |
// Permit args to have no register |
|
259 |
_calling_convention_mask[i].Clear(); |
|
260 |
if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) { |
|
261 |
continue; |
|
262 |
} |
|
263 |
// calling_convention returns stack arguments as a count of |
|
264 |
// slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to |
|
265 |
// the allocators point of view, taking into account all the |
|
266 |
// preserve area, locks & pad2. |
|
267 |
||
268 |
OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first()); |
|
269 |
if( OptoReg::is_valid(reg1)) |
|
270 |
_calling_convention_mask[i].Insert(reg1); |
|
271 |
||
272 |
OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second()); |
|
273 |
if( OptoReg::is_valid(reg2)) |
|
274 |
_calling_convention_mask[i].Insert(reg2); |
|
275 |
||
276 |
// Saved biased stack-slot register number |
|
277 |
_parm_regs[i].set_pair(reg2, reg1); |
|
278 |
} |
|
279 |
||
280 |
// Finally, make sure the incoming arguments take up an even number of |
|
281 |
// words, in case the arguments or locals need to contain doubleword stack |
|
282 |
// slots. The rest of the system assumes that stack slot pairs (in |
|
283 |
// particular, in the spill area) which look aligned will in fact be |
|
284 |
// aligned relative to the stack pointer in the target machine. Double |
|
285 |
// stack slots will always be allocated aligned. |
|
286 |
_new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong)); |
|
287 |
||
288 |
// Compute highest outgoing stack argument as |
|
289 |
// _new_SP + out_preserve_stack_slots + max(outgoing argument size). |
|
290 |
_out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots()); |
|
291 |
assert( is_even(_out_arg_limit), "out_preserve must be even" ); |
|
292 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
293 |
if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) { |
1 | 294 |
// the compiler cannot represent this method's calling sequence |
295 |
C->record_method_not_compilable("must be able to represent all call arguments in reg mask"); |
|
296 |
} |
|
297 |
||
298 |
if (C->failing()) return; // bailed out on incoming arg failure |
|
299 |
||
300 |
// --------------- |
|
301 |
// Collect roots of matcher trees. Every node for which |
|
302 |
// _shared[_idx] is cleared is guaranteed to not be shared, and thus |
|
303 |
// can be a valid interior of some tree. |
|
304 |
find_shared( C->root() ); |
|
305 |
find_shared( C->top() ); |
|
306 |
||
18025 | 307 |
C->print_method(PHASE_BEFORE_MATCHING); |
1 | 308 |
|
2573
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
309 |
// Create new ideal node ConP #NULL even if it does exist in old space |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
310 |
// to avoid false sharing if the corresponding mach node is not used. |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
311 |
// The corresponding mach node is only used in rare cases for derived |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
312 |
// pointers. |
25930 | 313 |
Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR); |
2573
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
314 |
|
1 | 315 |
// Swap out to old-space; emptying new-space |
316 |
Arena *old = C->node_arena()->move_contents(C->old_arena()); |
|
317 |
||
318 |
// Save debug and profile information for nodes in old space: |
|
319 |
_old_node_note_array = C->node_note_array(); |
|
320 |
if (_old_node_note_array != NULL) { |
|
321 |
C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*> |
|
322 |
(C->comp_arena(), _old_node_note_array->length(), |
|
323 |
0, NULL)); |
|
324 |
} |
|
325 |
||
326 |
// Pre-size the new_node table to avoid the need for range checks. |
|
327 |
grow_new_node_array(C->unique()); |
|
328 |
||
329 |
// Reset node counter so MachNodes start with _idx at 0 |
|
33158
f4e6c593ba73
8137160: Use Compile::live_nodes instead of Compile::unique() in appropriate places -- followup
zmajo
parents:
33082
diff
changeset
|
330 |
int live_nodes = C->live_nodes(); |
1 | 331 |
C->set_unique(0); |
14623
70c4c1be0a14
7092905: C2: Keep track of the number of dead nodes
bharadwaj
parents:
13969
diff
changeset
|
332 |
C->reset_dead_node_list(); |
1 | 333 |
|
334 |
// Recursively match trees from old space into new space. |
|
335 |
// Correct leaves of new-space Nodes; they point to old-space. |
|
336 |
_visited.Clear(); // Clear visit bits for xform call |
|
33158
f4e6c593ba73
8137160: Use Compile::live_nodes instead of Compile::unique() in appropriate places -- followup
zmajo
parents:
33082
diff
changeset
|
337 |
C->set_cached_top_node(xform( C->top(), live_nodes )); |
1 | 338 |
if (!C->failing()) { |
339 |
Node* xroot = xform( C->root(), 1 ); |
|
340 |
if (xroot == NULL) { |
|
341 |
Matcher::soft_match_failure(); // recursive matching process failed |
|
342 |
C->record_method_not_compilable("instruction match failed"); |
|
343 |
} else { |
|
344 |
// During matching shared constants were attached to C->root() |
|
345 |
// because xroot wasn't available yet, so transfer the uses to |
|
346 |
// the xroot. |
|
347 |
for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) { |
|
348 |
Node* n = C->root()->fast_out(j); |
|
349 |
if (C->node_arena()->contains(n)) { |
|
350 |
assert(n->in(0) == C->root(), "should be control user"); |
|
351 |
n->set_req(0, xroot); |
|
352 |
--j; |
|
353 |
--jmax; |
|
354 |
} |
|
355 |
} |
|
356 |
||
2573
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
357 |
// Generate new mach node for ConP #NULL |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
358 |
assert(new_ideal_null != NULL, "sanity"); |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
359 |
_mach_null = match_tree(new_ideal_null); |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
360 |
// Don't set control, it will confuse GCM since there are no uses. |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
361 |
// The control will be set when this node is used first time |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
362 |
// in find_base_for_derived(). |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
363 |
assert(_mach_null != NULL, ""); |
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
364 |
|
1 | 365 |
C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL); |
2573
b5002ef26155
6709742: find_base_for_derived's use of Ideal NULL is unsafe causing crashes during register allocation
kvn
parents:
2348
diff
changeset
|
366 |
|
1 | 367 |
#ifdef ASSERT |
368 |
verify_new_nodes_only(xroot); |
|
369 |
#endif |
|
370 |
} |
|
371 |
} |
|
372 |
if (C->top() == NULL || C->root() == NULL) { |
|
373 |
C->record_method_not_compilable("graph lost"); // %%% cannot happen? |
|
374 |
} |
|
375 |
if (C->failing()) { |
|
376 |
// delete old; |
|
377 |
old->destruct_contents(); |
|
378 |
return; |
|
379 |
} |
|
380 |
assert( C->top(), "" ); |
|
381 |
assert( C->root(), "" ); |
|
382 |
validate_null_checks(); |
|
383 |
||
384 |
// Now smoke old-space |
|
385 |
NOT_DEBUG( old->destruct_contents() ); |
|
386 |
||
387 |
// ------------------------ |
|
388 |
// Set up save-on-entry registers |
|
389 |
Fixup_Save_On_Entry( ); |
|
390 |
} |
|
391 |
||
392 |
||
393 |
//------------------------------Fixup_Save_On_Entry---------------------------- |
|
394 |
// The stated purpose of this routine is to take care of save-on-entry |
|
395 |
// registers. However, the overall goal of the Match phase is to convert into |
|
396 |
// machine-specific instructions which have RegMasks to guide allocation. |
|
397 |
// So what this procedure really does is put a valid RegMask on each input |
|
398 |
// to the machine-specific variations of all Return, TailCall and Halt |
|
399 |
// instructions. It also adds edgs to define the save-on-entry values (and of |
|
400 |
// course gives them a mask). |
|
401 |
||
402 |
static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) { |
|
403 |
RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size ); |
|
404 |
// Do all the pre-defined register masks |
|
405 |
rms[TypeFunc::Control ] = RegMask::Empty; |
|
406 |
rms[TypeFunc::I_O ] = RegMask::Empty; |
|
407 |
rms[TypeFunc::Memory ] = RegMask::Empty; |
|
408 |
rms[TypeFunc::ReturnAdr] = ret_adr; |
|
409 |
rms[TypeFunc::FramePtr ] = fp; |
|
410 |
return rms; |
|
411 |
} |
|
412 |
||
413 |
//---------------------------init_first_stack_mask----------------------------- |
|
414 |
// Create the initial stack mask used by values spilling to the stack. |
|
415 |
// Disallow any debug info in outgoing argument areas by setting the |
|
416 |
// initial mask accordingly. |
|
417 |
void Matcher::init_first_stack_mask() { |
|
418 |
||
419 |
// Allocate storage for spill masks as masks for the appropriate load type. |
|
30624 | 420 |
RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+5)); |
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
421 |
|
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
422 |
idealreg2spillmask [Op_RegN] = &rms[0]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
423 |
idealreg2spillmask [Op_RegI] = &rms[1]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
424 |
idealreg2spillmask [Op_RegL] = &rms[2]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
425 |
idealreg2spillmask [Op_RegF] = &rms[3]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
426 |
idealreg2spillmask [Op_RegD] = &rms[4]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
427 |
idealreg2spillmask [Op_RegP] = &rms[5]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
428 |
|
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
429 |
idealreg2debugmask [Op_RegN] = &rms[6]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
430 |
idealreg2debugmask [Op_RegI] = &rms[7]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
431 |
idealreg2debugmask [Op_RegL] = &rms[8]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
432 |
idealreg2debugmask [Op_RegF] = &rms[9]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
433 |
idealreg2debugmask [Op_RegD] = &rms[10]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
434 |
idealreg2debugmask [Op_RegP] = &rms[11]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
435 |
|
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
436 |
idealreg2mhdebugmask[Op_RegN] = &rms[12]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
437 |
idealreg2mhdebugmask[Op_RegI] = &rms[13]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
438 |
idealreg2mhdebugmask[Op_RegL] = &rms[14]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
439 |
idealreg2mhdebugmask[Op_RegF] = &rms[15]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
440 |
idealreg2mhdebugmask[Op_RegD] = &rms[16]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
441 |
idealreg2mhdebugmask[Op_RegP] = &rms[17]; |
1 | 442 |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
443 |
idealreg2spillmask [Op_VecS] = &rms[18]; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
444 |
idealreg2spillmask [Op_VecD] = &rms[19]; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
445 |
idealreg2spillmask [Op_VecX] = &rms[20]; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
446 |
idealreg2spillmask [Op_VecY] = &rms[21]; |
30624 | 447 |
idealreg2spillmask [Op_VecZ] = &rms[22]; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
448 |
|
1 | 449 |
OptoReg::Name i; |
450 |
||
451 |
// At first, start with the empty mask |
|
452 |
C->FIRST_STACK_mask().Clear(); |
|
453 |
||
454 |
// Add in the incoming argument area |
|
21574
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
455 |
OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots()); |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
456 |
for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) { |
1 | 457 |
C->FIRST_STACK_mask().Insert(i); |
21574
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
458 |
} |
1 | 459 |
// Add in all bits past the outgoing argument area |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
460 |
guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)), |
1 | 461 |
"must be able to represent all call arguments in reg mask"); |
21574
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
462 |
OptoReg::Name init = _out_arg_limit; |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
463 |
for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) { |
1 | 464 |
C->FIRST_STACK_mask().Insert(i); |
21574
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
465 |
} |
1 | 466 |
// Finally, set the "infinite stack" bit. |
467 |
C->FIRST_STACK_mask().set_AllStack(); |
|
468 |
||
469 |
// Make spill masks. Registers for their class, plus FIRST_STACK_mask. |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
470 |
RegMask aligned_stack_mask = C->FIRST_STACK_mask(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
471 |
// Keep spill masks aligned. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
472 |
aligned_stack_mask.clear_to_pairs(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
473 |
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
474 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
475 |
*idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP]; |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
476 |
#ifdef _LP64 |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
477 |
*idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN]; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
478 |
idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask()); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
479 |
idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
480 |
#else |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
481 |
idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask()); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
482 |
#endif |
1 | 483 |
*idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI]; |
484 |
idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask()); |
|
485 |
*idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL]; |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
486 |
idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask); |
1 | 487 |
*idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF]; |
488 |
idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask()); |
|
489 |
*idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD]; |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
490 |
idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask); |
1 | 491 |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
492 |
if (Matcher::vector_size_supported(T_BYTE,4)) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
493 |
*idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS]; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
494 |
idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask()); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
495 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
496 |
if (Matcher::vector_size_supported(T_FLOAT,2)) { |
21574
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
497 |
// For VecD we need dual alignment and 8 bytes (2 slots) for spills. |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
498 |
// RA guarantees such alignment since it is needed for Double and Long values. |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
499 |
*idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD]; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
500 |
idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
501 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
502 |
if (Matcher::vector_size_supported(T_FLOAT,4)) { |
21574
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
503 |
// For VecX we need quadro alignment and 16 bytes (4 slots) for spills. |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
504 |
// |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
505 |
// RA can use input arguments stack slots for spills but until RA |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
506 |
// we don't know frame size and offset of input arg stack slots. |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
507 |
// |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
508 |
// Exclude last input arg stack slots to avoid spilling vectors there |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
509 |
// otherwise vector spills could stomp over stack slots in caller frame. |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
510 |
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
511 |
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) { |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
512 |
aligned_stack_mask.Remove(in); |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
513 |
in = OptoReg::add(in, -1); |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
514 |
} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
515 |
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
516 |
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
517 |
*idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX]; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
518 |
idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
519 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
520 |
if (Matcher::vector_size_supported(T_FLOAT,8)) { |
21574
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
521 |
// For VecY we need octo alignment and 32 bytes (8 slots) for spills. |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
522 |
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
523 |
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) { |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
524 |
aligned_stack_mask.Remove(in); |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
525 |
in = OptoReg::add(in, -1); |
d47713227456
8024830: SEGV in org.apache.lucene.codecs.compressing.CompressingTermVectorsReader.get
kvn
parents:
20289
diff
changeset
|
526 |
} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
527 |
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
528 |
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
529 |
*idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY]; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
530 |
idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
531 |
} |
30624 | 532 |
if (Matcher::vector_size_supported(T_FLOAT,16)) { |
533 |
// For VecZ we need enough alignment and 64 bytes (16 slots) for spills. |
|
534 |
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); |
|
535 |
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) { |
|
536 |
aligned_stack_mask.Remove(in); |
|
537 |
in = OptoReg::add(in, -1); |
|
538 |
} |
|
539 |
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ); |
|
540 |
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); |
|
541 |
*idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ]; |
|
542 |
idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask); |
|
543 |
} |
|
6272
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
544 |
if (UseFPUForSpilling) { |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
545 |
// This mask logic assumes that the spill operations are |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
546 |
// symmetric and that the registers involved are the same size. |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
547 |
// On sparc for instance we may have to use 64 bit moves will |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
548 |
// kill 2 registers when used with F0-F31. |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
549 |
idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]); |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
550 |
idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]); |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
551 |
#ifdef _LP64 |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
552 |
idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]); |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
553 |
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]); |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
554 |
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]); |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
555 |
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]); |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
556 |
#else |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
557 |
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]); |
10518 | 558 |
#ifdef ARM |
559 |
// ARM has support for moving 64bit values between a pair of |
|
560 |
// integer registers and a double register |
|
561 |
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]); |
|
562 |
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]); |
|
563 |
#endif |
|
6272
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
564 |
#endif |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
565 |
} |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5702
diff
changeset
|
566 |
|
1 | 567 |
// Make up debug masks. Any spill slot plus callee-save registers. |
568 |
// Caller-save registers are assumed to be trashable by the various |
|
569 |
// inline-cache fixup routines. |
|
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
570 |
*idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
571 |
*idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
572 |
*idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
573 |
*idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
574 |
*idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
575 |
*idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
576 |
|
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
577 |
*idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
578 |
*idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
579 |
*idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
580 |
*idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
581 |
*idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD]; |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
582 |
*idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP]; |
1 | 583 |
|
584 |
// Prevent stub compilations from attempting to reference |
|
585 |
// callee-saved registers from debug info |
|
586 |
bool exclude_soe = !Compile::current()->is_method_compilation(); |
|
587 |
||
588 |
for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) { |
|
589 |
// registers the caller has to save do not work |
|
590 |
if( _register_save_policy[i] == 'C' || |
|
591 |
_register_save_policy[i] == 'A' || |
|
592 |
(_register_save_policy[i] == 'E' && exclude_soe) ) { |
|
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
593 |
idealreg2debugmask [Op_RegN]->Remove(i); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
594 |
idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
595 |
idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
596 |
idealreg2debugmask [Op_RegF]->Remove(i); // masks |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
597 |
idealreg2debugmask [Op_RegD]->Remove(i); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
598 |
idealreg2debugmask [Op_RegP]->Remove(i); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
599 |
|
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
600 |
idealreg2mhdebugmask[Op_RegN]->Remove(i); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
601 |
idealreg2mhdebugmask[Op_RegI]->Remove(i); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
602 |
idealreg2mhdebugmask[Op_RegL]->Remove(i); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
603 |
idealreg2mhdebugmask[Op_RegF]->Remove(i); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
604 |
idealreg2mhdebugmask[Op_RegD]->Remove(i); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
605 |
idealreg2mhdebugmask[Op_RegP]->Remove(i); |
1 | 606 |
} |
607 |
} |
|
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
608 |
|
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
609 |
// Subtract the register we use to save the SP for MethodHandle |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
610 |
// invokes to from the debug mask. |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
611 |
const RegMask save_mask = method_handle_invoke_SP_save_mask(); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
612 |
idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
613 |
idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
614 |
idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
615 |
idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
616 |
idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
617 |
idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask); |
1 | 618 |
} |
619 |
||
620 |
//---------------------------is_save_on_entry---------------------------------- |
|
621 |
bool Matcher::is_save_on_entry( int reg ) { |
|
622 |
return |
|
623 |
_register_save_policy[reg] == 'E' || |
|
624 |
_register_save_policy[reg] == 'A' || // Save-on-entry register? |
|
625 |
// Also save argument registers in the trampolining stubs |
|
626 |
(C->save_argument_registers() && is_spillable_arg(reg)); |
|
627 |
} |
|
628 |
||
629 |
//---------------------------Fixup_Save_On_Entry------------------------------- |
|
630 |
void Matcher::Fixup_Save_On_Entry( ) { |
|
631 |
init_first_stack_mask(); |
|
632 |
||
633 |
Node *root = C->root(); // Short name for root |
|
634 |
// Count number of save-on-entry registers. |
|
635 |
uint soe_cnt = number_of_saved_registers(); |
|
636 |
uint i; |
|
637 |
||
638 |
// Find the procedure Start Node |
|
639 |
StartNode *start = C->start(); |
|
640 |
assert( start, "Expect a start node" ); |
|
641 |
||
642 |
// Save argument registers in the trampolining stubs |
|
643 |
if( C->save_argument_registers() ) |
|
644 |
for( i = 0; i < _last_Mach_Reg; i++ ) |
|
645 |
if( is_spillable_arg(i) ) |
|
646 |
soe_cnt++; |
|
647 |
||
648 |
// Input RegMask array shared by all Returns. |
|
649 |
// The type for doubles and longs has a count of 2, but |
|
650 |
// there is only 1 returned value |
|
651 |
uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1); |
|
652 |
RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); |
|
653 |
// Returns have 0 or 1 returned values depending on call signature. |
|
654 |
// Return register is specified by return_value in the AD file. |
|
655 |
if (ret_edge_cnt > TypeFunc::Parms) |
|
656 |
ret_rms[TypeFunc::Parms+0] = _return_value_mask; |
|
657 |
||
658 |
// Input RegMask array shared by all Rethrows. |
|
659 |
uint reth_edge_cnt = TypeFunc::Parms+1; |
|
660 |
RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); |
|
661 |
// Rethrow takes exception oop only, but in the argument 0 slot. |
|
38658
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38286
diff
changeset
|
662 |
OptoReg::Name reg = find_receiver(false); |
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38286
diff
changeset
|
663 |
if (reg >= 0) { |
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38286
diff
changeset
|
664 |
reth_rms[TypeFunc::Parms] = mreg2regmask[reg]; |
1 | 665 |
#ifdef _LP64 |
38658
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38286
diff
changeset
|
666 |
// Need two slots for ptrs in 64-bit land |
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38286
diff
changeset
|
667 |
reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1)); |
1 | 668 |
#endif |
38658
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38286
diff
changeset
|
669 |
} |
1 | 670 |
|
671 |
// Input RegMask array shared by all TailCalls |
|
672 |
uint tail_call_edge_cnt = TypeFunc::Parms+2; |
|
673 |
RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); |
|
674 |
||
675 |
// Input RegMask array shared by all TailJumps |
|
676 |
uint tail_jump_edge_cnt = TypeFunc::Parms+2; |
|
677 |
RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); |
|
678 |
||
679 |
// TailCalls have 2 returned values (target & moop), whose masks come |
|
680 |
// from the usual MachNode/MachOper mechanism. Find a sample |
|
681 |
// TailCall to extract these masks and put the correct masks into |
|
682 |
// the tail_call_rms array. |
|
683 |
for( i=1; i < root->req(); i++ ) { |
|
684 |
MachReturnNode *m = root->in(i)->as_MachReturn(); |
|
685 |
if( m->ideal_Opcode() == Op_TailCall ) { |
|
686 |
tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0); |
|
687 |
tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1); |
|
688 |
break; |
|
689 |
} |
|
690 |
} |
|
691 |
||
692 |
// TailJumps have 2 returned values (target & ex_oop), whose masks come |
|
693 |
// from the usual MachNode/MachOper mechanism. Find a sample |
|
694 |
// TailJump to extract these masks and put the correct masks into |
|
695 |
// the tail_jump_rms array. |
|
696 |
for( i=1; i < root->req(); i++ ) { |
|
697 |
MachReturnNode *m = root->in(i)->as_MachReturn(); |
|
698 |
if( m->ideal_Opcode() == Op_TailJump ) { |
|
699 |
tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0); |
|
700 |
tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1); |
|
701 |
break; |
|
702 |
} |
|
703 |
} |
|
704 |
||
705 |
// Input RegMask array shared by all Halts |
|
706 |
uint halt_edge_cnt = TypeFunc::Parms; |
|
707 |
RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); |
|
708 |
||
709 |
// Capture the return input masks into each exit flavor |
|
710 |
for( i=1; i < root->req(); i++ ) { |
|
711 |
MachReturnNode *exit = root->in(i)->as_MachReturn(); |
|
712 |
switch( exit->ideal_Opcode() ) { |
|
713 |
case Op_Return : exit->_in_rms = ret_rms; break; |
|
714 |
case Op_Rethrow : exit->_in_rms = reth_rms; break; |
|
715 |
case Op_TailCall : exit->_in_rms = tail_call_rms; break; |
|
716 |
case Op_TailJump : exit->_in_rms = tail_jump_rms; break; |
|
717 |
case Op_Halt : exit->_in_rms = halt_rms; break; |
|
718 |
default : ShouldNotReachHere(); |
|
719 |
} |
|
720 |
} |
|
721 |
||
722 |
// Next unused projection number from Start. |
|
723 |
int proj_cnt = C->tf()->domain()->cnt(); |
|
724 |
||
725 |
// Do all the save-on-entry registers. Make projections from Start for |
|
726 |
// them, and give them a use at the exit points. To the allocator, they |
|
727 |
// look like incoming register arguments. |
|
728 |
for( i = 0; i < _last_Mach_Reg; i++ ) { |
|
729 |
if( is_save_on_entry(i) ) { |
|
730 |
||
731 |
// Add the save-on-entry to the mask array |
|
732 |
ret_rms [ ret_edge_cnt] = mreg2regmask[i]; |
|
733 |
reth_rms [ reth_edge_cnt] = mreg2regmask[i]; |
|
734 |
tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i]; |
|
735 |
tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i]; |
|
736 |
// Halts need the SOE registers, but only in the stack as debug info. |
|
737 |
// A just-prior uncommon-trap or deoptimization will use the SOE regs. |
|
738 |
halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]]; |
|
739 |
||
740 |
Node *mproj; |
|
741 |
||
742 |
// Is this a RegF low half of a RegD? Double up 2 adjacent RegF's |
|
743 |
// into a single RegD. |
|
744 |
if( (i&1) == 0 && |
|
745 |
_register_save_type[i ] == Op_RegF && |
|
746 |
_register_save_type[i+1] == Op_RegF && |
|
747 |
is_save_on_entry(i+1) ) { |
|
748 |
// Add other bit for double |
|
749 |
ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
750 |
reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
751 |
tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
752 |
tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
753 |
halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
754 |
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD ); |
1 | 755 |
proj_cnt += 2; // Skip 2 for doubles |
756 |
} |
|
757 |
else if( (i&1) == 1 && // Else check for high half of double |
|
758 |
_register_save_type[i-1] == Op_RegF && |
|
759 |
_register_save_type[i ] == Op_RegF && |
|
760 |
is_save_on_entry(i-1) ) { |
|
761 |
ret_rms [ ret_edge_cnt] = RegMask::Empty; |
|
762 |
reth_rms [ reth_edge_cnt] = RegMask::Empty; |
|
763 |
tail_call_rms[tail_call_edge_cnt] = RegMask::Empty; |
|
764 |
tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty; |
|
765 |
halt_rms [ halt_edge_cnt] = RegMask::Empty; |
|
766 |
mproj = C->top(); |
|
767 |
} |
|
768 |
// Is this a RegI low half of a RegL? Double up 2 adjacent RegI's |
|
769 |
// into a single RegL. |
|
770 |
else if( (i&1) == 0 && |
|
771 |
_register_save_type[i ] == Op_RegI && |
|
772 |
_register_save_type[i+1] == Op_RegI && |
|
773 |
is_save_on_entry(i+1) ) { |
|
774 |
// Add other bit for long |
|
775 |
ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
776 |
reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
777 |
tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
778 |
tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
779 |
halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1)); |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
780 |
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL ); |
1 | 781 |
proj_cnt += 2; // Skip 2 for longs |
782 |
} |
|
783 |
else if( (i&1) == 1 && // Else check for high half of long |
|
784 |
_register_save_type[i-1] == Op_RegI && |
|
785 |
_register_save_type[i ] == Op_RegI && |
|
786 |
is_save_on_entry(i-1) ) { |
|
787 |
ret_rms [ ret_edge_cnt] = RegMask::Empty; |
|
788 |
reth_rms [ reth_edge_cnt] = RegMask::Empty; |
|
789 |
tail_call_rms[tail_call_edge_cnt] = RegMask::Empty; |
|
790 |
tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty; |
|
791 |
halt_rms [ halt_edge_cnt] = RegMask::Empty; |
|
792 |
mproj = C->top(); |
|
793 |
} else { |
|
794 |
// Make a projection for it off the Start |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
795 |
mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] ); |
1 | 796 |
} |
797 |
||
798 |
ret_edge_cnt ++; |
|
799 |
reth_edge_cnt ++; |
|
800 |
tail_call_edge_cnt ++; |
|
801 |
tail_jump_edge_cnt ++; |
|
802 |
halt_edge_cnt ++; |
|
803 |
||
804 |
// Add a use of the SOE register to all exit paths |
|
805 |
for( uint j=1; j < root->req(); j++ ) |
|
806 |
root->in(j)->add_req(mproj); |
|
807 |
} // End of if a save-on-entry register |
|
808 |
} // End of for all machine registers |
|
809 |
} |
|
810 |
||
811 |
//------------------------------init_spill_mask-------------------------------- |
|
812 |
void Matcher::init_spill_mask( Node *ret ) { |
|
813 |
if( idealreg2regmask[Op_RegI] ) return; // One time only init |
|
814 |
||
815 |
OptoReg::c_frame_pointer = c_frame_pointer(); |
|
816 |
c_frame_ptr_mask = c_frame_pointer(); |
|
817 |
#ifdef _LP64 |
|
818 |
// pointers are twice as big |
|
819 |
c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1)); |
|
820 |
#endif |
|
821 |
||
822 |
// Start at OptoReg::stack0() |
|
823 |
STACK_ONLY_mask.Clear(); |
|
824 |
OptoReg::Name init = OptoReg::stack2reg(0); |
|
825 |
// STACK_ONLY_mask is all stack bits |
|
826 |
OptoReg::Name i; |
|
827 |
for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) |
|
828 |
STACK_ONLY_mask.Insert(i); |
|
829 |
// Also set the "infinite stack" bit. |
|
830 |
STACK_ONLY_mask.set_AllStack(); |
|
831 |
||
832 |
// Copy the register names over into the shared world |
|
833 |
for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) { |
|
834 |
// SharedInfo::regName[i] = regName[i]; |
|
835 |
// Handy RegMasks per machine register |
|
836 |
mreg2regmask[i].Insert(i); |
|
837 |
} |
|
838 |
||
839 |
// Grab the Frame Pointer |
|
840 |
Node *fp = ret->in(TypeFunc::FramePtr); |
|
841 |
Node *mem = ret->in(TypeFunc::Memory); |
|
842 |
const TypePtr* atp = TypePtr::BOTTOM; |
|
843 |
// Share frame pointer while making spill ops |
|
844 |
set_shared(fp); |
|
845 |
||
846 |
// Compute generic short-offset Loads |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
847 |
#ifdef _LP64 |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
848 |
MachNode *spillCP = match_tree(new LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered)); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
849 |
#endif |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
850 |
MachNode *spillI = match_tree(new LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered)); |
31035
0f0743952c41
8077504: Unsafe load can loose control dependency and cause crash
roland
parents:
30624
diff
changeset
|
851 |
MachNode *spillL = match_tree(new LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered, LoadNode::DependsOnlyOnTest, false)); |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
852 |
MachNode *spillF = match_tree(new LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered)); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
853 |
MachNode *spillD = match_tree(new LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered)); |
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
854 |
MachNode *spillP = match_tree(new LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered)); |
1 | 855 |
assert(spillI != NULL && spillL != NULL && spillF != NULL && |
856 |
spillD != NULL && spillP != NULL, ""); |
|
857 |
// Get the ADLC notion of the right regmask, for each basic type. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
858 |
#ifdef _LP64 |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
859 |
idealreg2regmask[Op_RegN] = &spillCP->out_RegMask(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
860 |
#endif |
1 | 861 |
idealreg2regmask[Op_RegI] = &spillI->out_RegMask(); |
862 |
idealreg2regmask[Op_RegL] = &spillL->out_RegMask(); |
|
863 |
idealreg2regmask[Op_RegF] = &spillF->out_RegMask(); |
|
864 |
idealreg2regmask[Op_RegD] = &spillD->out_RegMask(); |
|
865 |
idealreg2regmask[Op_RegP] = &spillP->out_RegMask(); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
866 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
867 |
// Vector regmasks. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
868 |
if (Matcher::vector_size_supported(T_BYTE,4)) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
869 |
TypeVect::VECTS = TypeVect::make(T_BYTE, 4); |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
870 |
MachNode *spillVectS = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS)); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
871 |
idealreg2regmask[Op_VecS] = &spillVectS->out_RegMask(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
872 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
873 |
if (Matcher::vector_size_supported(T_FLOAT,2)) { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
874 |
MachNode *spillVectD = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD)); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
875 |
idealreg2regmask[Op_VecD] = &spillVectD->out_RegMask(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
876 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
877 |
if (Matcher::vector_size_supported(T_FLOAT,4)) { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
878 |
MachNode *spillVectX = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX)); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
879 |
idealreg2regmask[Op_VecX] = &spillVectX->out_RegMask(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
880 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
881 |
if (Matcher::vector_size_supported(T_FLOAT,8)) { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
882 |
MachNode *spillVectY = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY)); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
883 |
idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
884 |
} |
30624 | 885 |
if (Matcher::vector_size_supported(T_FLOAT,16)) { |
886 |
MachNode *spillVectZ = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTZ)); |
|
887 |
idealreg2regmask[Op_VecZ] = &spillVectZ->out_RegMask(); |
|
888 |
} |
|
1 | 889 |
} |
890 |
||
891 |
#ifdef ASSERT |
|
892 |
static void match_alias_type(Compile* C, Node* n, Node* m) { |
|
893 |
if (!VerifyAliases) return; // do not go looking for trouble by default |
|
894 |
const TypePtr* nat = n->adr_type(); |
|
895 |
const TypePtr* mat = m->adr_type(); |
|
896 |
int nidx = C->get_alias_index(nat); |
|
897 |
int midx = C->get_alias_index(mat); |
|
898 |
// Detune the assert for cases like (AndI 0xFF (LoadB p)). |
|
899 |
if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) { |
|
900 |
for (uint i = 1; i < n->req(); i++) { |
|
901 |
Node* n1 = n->in(i); |
|
902 |
const TypePtr* n1at = n1->adr_type(); |
|
903 |
if (n1at != NULL) { |
|
904 |
nat = n1at; |
|
905 |
nidx = C->get_alias_index(n1at); |
|
906 |
} |
|
907 |
} |
|
908 |
} |
|
909 |
// %%% Kludgery. Instead, fix ideal adr_type methods for all these cases: |
|
910 |
if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) { |
|
911 |
switch (n->Opcode()) { |
|
10267 | 912 |
case Op_PrefetchAllocation: |
1 | 913 |
nidx = Compile::AliasIdxRaw; |
914 |
nat = TypeRawPtr::BOTTOM; |
|
915 |
break; |
|
916 |
} |
|
917 |
} |
|
918 |
if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) { |
|
919 |
switch (n->Opcode()) { |
|
920 |
case Op_ClearArray: |
|
921 |
midx = Compile::AliasIdxRaw; |
|
922 |
mat = TypeRawPtr::BOTTOM; |
|
923 |
break; |
|
924 |
} |
|
925 |
} |
|
926 |
if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) { |
|
927 |
switch (n->Opcode()) { |
|
928 |
case Op_Return: |
|
929 |
case Op_Rethrow: |
|
930 |
case Op_Halt: |
|
931 |
case Op_TailCall: |
|
932 |
case Op_TailJump: |
|
933 |
nidx = Compile::AliasIdxBot; |
|
934 |
nat = TypePtr::BOTTOM; |
|
935 |
break; |
|
936 |
} |
|
937 |
} |
|
938 |
if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) { |
|
939 |
switch (n->Opcode()) { |
|
940 |
case Op_StrComp: |
|
2348 | 941 |
case Op_StrEquals: |
942 |
case Op_StrIndexOf: |
|
33628 | 943 |
case Op_StrIndexOfChar: |
595
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
944 |
case Op_AryEq: |
33628 | 945 |
case Op_HasNegatives: |
1 | 946 |
case Op_MemBarVolatile: |
947 |
case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type? |
|
33628 | 948 |
case Op_StrInflatedCopy: |
949 |
case Op_StrCompressedCopy: |
|
38017
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36336
diff
changeset
|
950 |
case Op_OnSpinWait: |
15242
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
14623
diff
changeset
|
951 |
case Op_EncodeISOArray: |
1 | 952 |
nidx = Compile::AliasIdxTop; |
953 |
nat = NULL; |
|
954 |
break; |
|
955 |
} |
|
956 |
} |
|
957 |
if (nidx != midx) { |
|
958 |
if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) { |
|
959 |
tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx); |
|
960 |
n->dump(); |
|
961 |
m->dump(); |
|
962 |
} |
|
963 |
assert(C->subsume_loads() && C->must_alias(nat, midx), |
|
964 |
"must not lose alias info when matching"); |
|
965 |
} |
|
966 |
} |
|
967 |
#endif |
|
968 |
||
969 |
//------------------------------xform------------------------------------------ |
|
970 |
// Given a Node in old-space, Match him (Label/Reduce) to produce a machine |
|
971 |
// Node in new-space. Given a new-space Node, recursively walk his children. |
|
972 |
Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; } |
|
973 |
Node *Matcher::xform( Node *n, int max_stack ) { |
|
974 |
// Use one stack to keep both: child's node/state and parent's node/index |
|
33158
f4e6c593ba73
8137160: Use Compile::live_nodes instead of Compile::unique() in appropriate places -- followup
zmajo
parents:
33082
diff
changeset
|
975 |
MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2 |
1 | 976 |
mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root |
977 |
||
978 |
while (mstack.is_nonempty()) { |
|
18099
45973b036c3e
8014959: assert(Compile::current()->live_nodes() < (uint)MaxNodeLimit) failed: Live Node limit exceeded limit
drchase
parents:
17875
diff
changeset
|
979 |
C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions"); |
45973b036c3e
8014959: assert(Compile::current()->live_nodes() < (uint)MaxNodeLimit) failed: Live Node limit exceeded limit
drchase
parents:
17875
diff
changeset
|
980 |
if (C->failing()) return NULL; |
1 | 981 |
n = mstack.node(); // Leave node on stack |
982 |
Node_State nstate = mstack.state(); |
|
983 |
if (nstate == Visit) { |
|
984 |
mstack.set_state(Post_Visit); |
|
985 |
Node *oldn = n; |
|
986 |
// Old-space or new-space check |
|
987 |
if (!C->node_arena()->contains(n)) { |
|
988 |
// Old space! |
|
989 |
Node* m; |
|
990 |
if (has_new_node(n)) { // Not yet Label/Reduced |
|
991 |
m = new_node(n); |
|
992 |
} else { |
|
993 |
if (!is_dontcare(n)) { // Matcher can match this guy |
|
994 |
// Calls match special. They match alone with no children. |
|
995 |
// Their children, the incoming arguments, match normally. |
|
996 |
m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n); |
|
997 |
if (C->failing()) return NULL; |
|
998 |
if (m == NULL) { Matcher::soft_match_failure(); return NULL; } |
|
999 |
} else { // Nothing the matcher cares about |
|
1000 |
if( n->is_Proj() && n->in(0)->is_Multi()) { // Projections? |
|
1001 |
// Convert to machine-dependent projection |
|
1002 |
m = n->in(0)->as_Multi()->match( n->as_Proj(), this ); |
|
768 | 1003 |
#ifdef ASSERT |
1004 |
_new2old_map.map(m->_idx, n); |
|
1005 |
#endif |
|
1 | 1006 |
if (m->in(0) != NULL) // m might be top |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
1007 |
collect_null_checks(m, n); |
1 | 1008 |
} else { // Else just a regular 'ol guy |
1009 |
m = n->clone(); // So just clone into new-space |
|
768 | 1010 |
#ifdef ASSERT |
1011 |
_new2old_map.map(m->_idx, n); |
|
1012 |
#endif |
|
1 | 1013 |
// Def-Use edges will be added incrementally as Uses |
1014 |
// of this node are matched. |
|
1015 |
assert(m->outcnt() == 0, "no Uses of this clone yet"); |
|
1016 |
} |
|
1017 |
} |
|
1018 |
||
1019 |
set_new_node(n, m); // Map old to new |
|
1020 |
if (_old_node_note_array != NULL) { |
|
1021 |
Node_Notes* nn = C->locate_node_notes(_old_node_note_array, |
|
1022 |
n->_idx); |
|
1023 |
C->set_node_notes_at(m->_idx, nn); |
|
1024 |
} |
|
1025 |
debug_only(match_alias_type(C, n, m)); |
|
1026 |
} |
|
1027 |
n = m; // n is now a new-space node |
|
1028 |
mstack.set_node(n); |
|
1029 |
} |
|
1030 |
||
1031 |
// New space! |
|
1032 |
if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty()) |
|
1033 |
||
1034 |
int i; |
|
1035 |
// Put precedence edges on stack first (match them last). |
|
1036 |
for (i = oldn->req(); (uint)i < oldn->len(); i++) { |
|
1037 |
Node *m = oldn->in(i); |
|
1038 |
if (m == NULL) break; |
|
1039 |
// set -1 to call add_prec() instead of set_req() during Step1 |
|
1040 |
mstack.push(m, Visit, n, -1); |
|
1041 |
} |
|
1042 |
||
30300
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1043 |
// Handle precedence edges for interior nodes |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1044 |
for (i = n->len()-1; (uint)i >= n->req(); i--) { |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1045 |
Node *m = n->in(i); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1046 |
if (m == NULL || C->node_arena()->contains(m)) continue; |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1047 |
n->rm_prec(i); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1048 |
// set -1 to call add_prec() instead of set_req() during Step1 |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1049 |
mstack.push(m, Visit, n, -1); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1050 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1051 |
|
1 | 1052 |
// For constant debug info, I'd rather have unmatched constants. |
1053 |
int cnt = n->req(); |
|
1054 |
JVMState* jvms = n->jvms(); |
|
1055 |
int debug_cnt = jvms ? jvms->debug_start() : cnt; |
|
1056 |
||
1057 |
// Now do only debug info. Clone constants rather than matching. |
|
1058 |
// Constants are represented directly in the debug info without |
|
1059 |
// the need for executable machine instructions. |
|
1060 |
// Monitor boxes are also represented directly. |
|
1061 |
for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do |
|
1062 |
Node *m = n->in(i); // Get input |
|
1063 |
int op = m->Opcode(); |
|
1064 |
assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites"); |
|
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1065 |
if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass || |
1 | 1066 |
op == Op_ConF || op == Op_ConD || op == Op_ConL |
1067 |
// || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp |
|
1068 |
) { |
|
1069 |
m = m->clone(); |
|
768 | 1070 |
#ifdef ASSERT |
1071 |
_new2old_map.map(m->_idx, n); |
|
1072 |
#endif |
|
2131 | 1073 |
mstack.push(m, Post_Visit, n, i); // Don't need to visit |
1 | 1074 |
mstack.push(m->in(0), Visit, m, 0); |
1075 |
} else { |
|
1076 |
mstack.push(m, Visit, n, i); |
|
1077 |
} |
|
1078 |
} |
|
1079 |
||
1080 |
// And now walk his children, and convert his inputs to new-space. |
|
1081 |
for( ; i >= 0; --i ) { // For all normal inputs do |
|
1082 |
Node *m = n->in(i); // Get input |
|
1083 |
if(m != NULL) |
|
1084 |
mstack.push(m, Visit, n, i); |
|
1085 |
} |
|
1086 |
||
1087 |
} |
|
1088 |
else if (nstate == Post_Visit) { |
|
1089 |
// Set xformed input |
|
1090 |
Node *p = mstack.parent(); |
|
1091 |
if (p != NULL) { // root doesn't have parent |
|
1092 |
int i = (int)mstack.index(); |
|
1093 |
if (i >= 0) |
|
1094 |
p->set_req(i, n); // required input |
|
1095 |
else if (i == -1) |
|
1096 |
p->add_prec(n); // precedence input |
|
1097 |
else |
|
1098 |
ShouldNotReachHere(); |
|
1099 |
} |
|
1100 |
mstack.pop(); // remove processed node from stack |
|
1101 |
} |
|
1102 |
else { |
|
1103 |
ShouldNotReachHere(); |
|
1104 |
} |
|
1105 |
} // while (mstack.is_nonempty()) |
|
1106 |
return n; // Return new-space Node |
|
1107 |
} |
|
1108 |
||
1109 |
//------------------------------warp_outgoing_stk_arg------------------------ |
|
1110 |
OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) { |
|
1111 |
// Convert outgoing argument location to a pre-biased stack offset |
|
1112 |
if (reg->is_stack()) { |
|
1113 |
OptoReg::Name warped = reg->reg2stack(); |
|
1114 |
// Adjust the stack slot offset to be the register number used |
|
1115 |
// by the allocator. |
|
1116 |
warped = OptoReg::add(begin_out_arg_area, warped); |
|
1117 |
// Keep track of the largest numbered stack slot used for an arg. |
|
1118 |
// Largest used slot per call-site indicates the amount of stack |
|
1119 |
// that is killed by the call. |
|
1120 |
if( warped >= out_arg_limit_per_call ) |
|
1121 |
out_arg_limit_per_call = OptoReg::add(warped,1); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
1122 |
if (!RegMask::can_represent_arg(warped)) { |
39431
cb1b2538c4b2
8159720: Failure of C2 compilation with tiered prevents some C1 compilations.
cvarming
parents:
39419
diff
changeset
|
1123 |
C->record_method_not_compilable("unsupported calling sequence"); |
1 | 1124 |
return OptoReg::Bad; |
1125 |
} |
|
1126 |
return warped; |
|
1127 |
} |
|
1128 |
return OptoReg::as_OptoReg(reg); |
|
1129 |
} |
|
1130 |
||
1131 |
||
1132 |
//------------------------------match_sfpt------------------------------------- |
|
1133 |
// Helper function to match call instructions. Calls match special. |
|
1134 |
// They match alone with no children. Their children, the incoming |
|
1135 |
// arguments, match normally. |
|
1136 |
MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) { |
|
1137 |
MachSafePointNode *msfpt = NULL; |
|
1138 |
MachCallNode *mcall = NULL; |
|
1139 |
uint cnt; |
|
1140 |
// Split out case for SafePoint vs Call |
|
1141 |
CallNode *call; |
|
1142 |
const TypeTuple *domain; |
|
1143 |
ciMethod* method = NULL; |
|
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
1144 |
bool is_method_handle_invoke = false; // for special kill effects |
1 | 1145 |
if( sfpt->is_Call() ) { |
1146 |
call = sfpt->as_Call(); |
|
1147 |
domain = call->tf()->domain(); |
|
1148 |
cnt = domain->cnt(); |
|
1149 |
||
1150 |
// Match just the call, nothing else |
|
1151 |
MachNode *m = match_tree(call); |
|
1152 |
if (C->failing()) return NULL; |
|
1153 |
if( m == NULL ) { Matcher::soft_match_failure(); return NULL; } |
|
1154 |
||
1155 |
// Copy data from the Ideal SafePoint to the machine version |
|
1156 |
mcall = m->as_MachCall(); |
|
1157 |
||
1158 |
mcall->set_tf( call->tf()); |
|
1159 |
mcall->set_entry_point(call->entry_point()); |
|
1160 |
mcall->set_cnt( call->cnt()); |
|
1161 |
||
1162 |
if( mcall->is_MachCallJava() ) { |
|
1163 |
MachCallJavaNode *mcall_java = mcall->as_MachCallJava(); |
|
1164 |
const CallJavaNode *call_java = call->as_CallJava(); |
|
1165 |
method = call_java->method(); |
|
1166 |
mcall_java->_method = method; |
|
1167 |
mcall_java->_bci = call_java->_bci; |
|
1168 |
mcall_java->_optimized_virtual = call_java->is_optimized_virtual(); |
|
4566
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
1169 |
is_method_handle_invoke = call_java->is_method_handle_invoke(); |
b363f6ef4068
6829187: compiler optimizations required for JSR 292
twisti
parents:
4431
diff
changeset
|
1170 |
mcall_java->_method_handle_invoke = is_method_handle_invoke; |
35086
bbf32241d851
8072008: Emit direct call instead of linkTo* for recursive indy/MH.invoke* calls
vlivanov
parents:
34174
diff
changeset
|
1171 |
mcall_java->_override_symbolic_info = call_java->override_symbolic_info(); |
10514
e229a19078cf
7071307: MethodHandle bimorphic inlining should consider the frequency
never
parents:
10267
diff
changeset
|
1172 |
if (is_method_handle_invoke) { |
e229a19078cf
7071307: MethodHandle bimorphic inlining should consider the frequency
never
parents:
10267
diff
changeset
|
1173 |
C->set_has_method_handle_invokes(true); |
e229a19078cf
7071307: MethodHandle bimorphic inlining should consider the frequency
never
parents:
10267
diff
changeset
|
1174 |
} |
1 | 1175 |
if( mcall_java->is_MachCallStaticJava() ) |
1176 |
mcall_java->as_MachCallStaticJava()->_name = |
|
1177 |
call_java->as_CallStaticJava()->_name; |
|
1178 |
if( mcall_java->is_MachCallDynamicJava() ) |
|
1179 |
mcall_java->as_MachCallDynamicJava()->_vtable_index = |
|
1180 |
call_java->as_CallDynamicJava()->_vtable_index; |
|
1181 |
} |
|
1182 |
else if( mcall->is_MachCallRuntime() ) { |
|
1183 |
mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name; |
|
1184 |
} |
|
1185 |
msfpt = mcall; |
|
1186 |
} |
|
1187 |
// This is a non-call safepoint |
|
1188 |
else { |
|
1189 |
call = NULL; |
|
1190 |
domain = NULL; |
|
1191 |
MachNode *mn = match_tree(sfpt); |
|
1192 |
if (C->failing()) return NULL; |
|
1193 |
msfpt = mn->as_MachSafePoint(); |
|
1194 |
cnt = TypeFunc::Parms; |
|
1195 |
} |
|
1196 |
||
1197 |
// Advertise the correct memory effects (for anti-dependence computation). |
|
1198 |
msfpt->set_adr_type(sfpt->adr_type()); |
|
1199 |
||
1200 |
// Allocate a private array of RegMasks. These RegMasks are not shared. |
|
1201 |
msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt ); |
|
1202 |
// Empty them all. |
|
1203 |
memset( msfpt->_in_rms, 0, sizeof(RegMask)*cnt ); |
|
1204 |
||
1205 |
// Do all the pre-defined non-Empty register masks |
|
1206 |
msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask; |
|
1207 |
msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask; |
|
1208 |
||
1209 |
// Place first outgoing argument can possibly be put. |
|
1210 |
OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots()); |
|
1211 |
assert( is_even(begin_out_arg_area), "" ); |
|
1212 |
// Compute max outgoing register number per call site. |
|
1213 |
OptoReg::Name out_arg_limit_per_call = begin_out_arg_area; |
|
1214 |
// Calls to C may hammer extra stack slots above and beyond any arguments. |
|
1215 |
// These are usually backing store for register arguments for varargs. |
|
1216 |
if( call != NULL && call->is_CallRuntime() ) |
|
1217 |
out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed()); |
|
1218 |
||
1219 |
||
1220 |
// Do the normal argument list (parameters) register masks |
|
1221 |
int argcnt = cnt - TypeFunc::Parms; |
|
1222 |
if( argcnt > 0 ) { // Skip it all if we have no args |
|
1223 |
BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt ); |
|
1224 |
VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt ); |
|
1225 |
int i; |
|
1226 |
for( i = 0; i < argcnt; i++ ) { |
|
1227 |
sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type(); |
|
1228 |
} |
|
1229 |
// V-call to pick proper calling convention |
|
1230 |
call->calling_convention( sig_bt, parm_regs, argcnt ); |
|
1231 |
||
1232 |
#ifdef ASSERT |
|
1233 |
// Sanity check users' calling convention. Really handy during |
|
1234 |
// the initial porting effort. Fairly expensive otherwise. |
|
1235 |
{ for (int i = 0; i<argcnt; i++) { |
|
1236 |
if( !parm_regs[i].first()->is_valid() && |
|
1237 |
!parm_regs[i].second()->is_valid() ) continue; |
|
1238 |
VMReg reg1 = parm_regs[i].first(); |
|
1239 |
VMReg reg2 = parm_regs[i].second(); |
|
1240 |
for (int j = 0; j < i; j++) { |
|
1241 |
if( !parm_regs[j].first()->is_valid() && |
|
1242 |
!parm_regs[j].second()->is_valid() ) continue; |
|
1243 |
VMReg reg3 = parm_regs[j].first(); |
|
1244 |
VMReg reg4 = parm_regs[j].second(); |
|
1245 |
if( !reg1->is_valid() ) { |
|
1246 |
assert( !reg2->is_valid(), "valid halvsies" ); |
|
1247 |
} else if( !reg3->is_valid() ) { |
|
1248 |
assert( !reg4->is_valid(), "valid halvsies" ); |
|
1249 |
} else { |
|
1250 |
assert( reg1 != reg2, "calling conv. must produce distinct regs"); |
|
1251 |
assert( reg1 != reg3, "calling conv. must produce distinct regs"); |
|
1252 |
assert( reg1 != reg4, "calling conv. must produce distinct regs"); |
|
1253 |
assert( reg2 != reg3, "calling conv. must produce distinct regs"); |
|
1254 |
assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs"); |
|
1255 |
assert( reg3 != reg4, "calling conv. must produce distinct regs"); |
|
1256 |
} |
|
1257 |
} |
|
1258 |
} |
|
1259 |
} |
|
1260 |
#endif |
|
1261 |
||
1262 |
// Visit each argument. Compute its outgoing register mask. |
|
1263 |
// Return results now can have 2 bits returned. |
|
1264 |
// Compute max over all outgoing arguments both per call-site |
|
1265 |
// and over the entire method. |
|
1266 |
for( i = 0; i < argcnt; i++ ) { |
|
1267 |
// Address of incoming argument mask to fill in |
|
1268 |
RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms]; |
|
1269 |
if( !parm_regs[i].first()->is_valid() && |
|
1270 |
!parm_regs[i].second()->is_valid() ) { |
|
1271 |
continue; // Avoid Halves |
|
1272 |
} |
|
1273 |
// Grab first register, adjust stack slots and insert in mask. |
|
1274 |
OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call ); |
|
1275 |
if (OptoReg::is_valid(reg1)) |
|
1276 |
rm->Insert( reg1 ); |
|
1277 |
// Grab second register (if any), adjust stack slots and insert in mask. |
|
1278 |
OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call ); |
|
1279 |
if (OptoReg::is_valid(reg2)) |
|
1280 |
rm->Insert( reg2 ); |
|
1281 |
} // End of for all arguments |
|
1282 |
||
1283 |
// Compute number of stack slots needed to restore stack in case of |
|
1284 |
// Pascal-style argument popping. |
|
1285 |
mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area; |
|
1286 |
} |
|
1287 |
||
1288 |
// Compute the max stack slot killed by any call. These will not be |
|
1289 |
// available for debug info, and will be used to adjust FIRST_STACK_mask |
|
1290 |
// after all call sites have been visited. |
|
1291 |
if( _out_arg_limit < out_arg_limit_per_call) |
|
1292 |
_out_arg_limit = out_arg_limit_per_call; |
|
1293 |
||
1294 |
if (mcall) { |
|
1295 |
// Kill the outgoing argument area, including any non-argument holes and |
|
1296 |
// any legacy C-killed slots. Use Fat-Projections to do the killing. |
|
1297 |
// Since the max-per-method covers the max-per-call-site and debug info |
|
1298 |
// is excluded on the max-per-method basis, debug info cannot land in |
|
1299 |
// this killed area. |
|
1300 |
uint r_cnt = mcall->tf()->range()->cnt(); |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
1301 |
MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj ); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11429
diff
changeset
|
1302 |
if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) { |
39431
cb1b2538c4b2
8159720: Failure of C2 compilation with tiered prevents some C1 compilations.
cvarming
parents:
39419
diff
changeset
|
1303 |
C->record_method_not_compilable("unsupported outgoing calling sequence"); |
1 | 1304 |
} else { |
1305 |
for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++) |
|
1306 |
proj->_rout.Insert(OptoReg::Name(i)); |
|
1307 |
} |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1308 |
if (proj->_rout.is_NotEmpty()) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1309 |
push_projection(proj); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1310 |
} |
1 | 1311 |
} |
1312 |
// Transfer the safepoint information from the call to the mcall |
|
1313 |
// Move the JVMState list |
|
1314 |
msfpt->set_jvms(sfpt->jvms()); |
|
1315 |
for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) { |
|
1316 |
jvms->set_map(sfpt); |
|
1317 |
} |
|
1318 |
||
1319 |
// Debug inputs begin just after the last incoming parameter |
|
22865
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1320 |
assert((mcall == NULL) || (mcall->jvms() == NULL) || |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1321 |
(mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), ""); |
1 | 1322 |
|
1323 |
// Move the OopMap |
|
1324 |
msfpt->_oop_map = sfpt->_oop_map; |
|
1325 |
||
22865
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1326 |
// Add additional edges. |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1327 |
if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) { |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1328 |
// For these calls we can not add MachConstantBase in expand(), as the |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1329 |
// ins are not complete then. |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1330 |
msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node()); |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1331 |
if (msfpt->jvms() && |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1332 |
msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) { |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1333 |
// We added an edge before jvms, so we must adapt the position of the ins. |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1334 |
msfpt->jvms()->adapt_position(+1); |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1335 |
} |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1336 |
} |
3b8857d7b3cc
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
goetz
parents:
22856
diff
changeset
|
1337 |
|
1 | 1338 |
// Registers killed by the call are set in the local scheduling pass |
1339 |
// of Global Code Motion. |
|
1340 |
return msfpt; |
|
1341 |
} |
|
1342 |
||
1343 |
//---------------------------match_tree---------------------------------------- |
|
1344 |
// Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part |
|
1345 |
// of the whole-sale conversion from Ideal to Mach Nodes. Also used for |
|
1346 |
// making GotoNodes while building the CFG and in init_spill_mask() to identify |
|
1347 |
// a Load's result RegMask for memoization in idealreg2regmask[] |
|
1348 |
MachNode *Matcher::match_tree( const Node *n ) { |
|
1349 |
assert( n->Opcode() != Op_Phi, "cannot match" ); |
|
1350 |
assert( !n->is_block_start(), "cannot match" ); |
|
1351 |
// Set the mark for all locally allocated State objects. |
|
1352 |
// When this call returns, the _states_arena arena will be reset |
|
1353 |
// freeing all State objects. |
|
1354 |
ResourceMark rm( &_states_arena ); |
|
1355 |
||
1356 |
LabelRootDepth = 0; |
|
1357 |
||
1358 |
// StoreNodes require their Memory input to match any LoadNodes |
|
1359 |
Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ; |
|
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1360 |
#ifdef ASSERT |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1361 |
Node* save_mem_node = _mem_node; |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1362 |
_mem_node = n->is_Store() ? (Node*)n : NULL; |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1363 |
#endif |
1 | 1364 |
// State object for root node of match tree |
1365 |
// Allocate it on _states_arena - stack allocation can cause stack overflow. |
|
1366 |
State *s = new (&_states_arena) State; |
|
1367 |
s->_kids[0] = NULL; |
|
1368 |
s->_kids[1] = NULL; |
|
1369 |
s->_leaf = (Node*)n; |
|
1370 |
// Label the input tree, allocating labels from top-level arena |
|
1371 |
Label_Root( n, s, n->in(0), mem ); |
|
1372 |
if (C->failing()) return NULL; |
|
1373 |
||
1374 |
// The minimum cost match for the whole tree is found at the root State |
|
1375 |
uint mincost = max_juint; |
|
1376 |
uint cost = max_juint; |
|
1377 |
uint i; |
|
1378 |
for( i = 0; i < NUM_OPERANDS; i++ ) { |
|
1379 |
if( s->valid(i) && // valid entry and |
|
1380 |
s->_cost[i] < cost && // low cost and |
|
1381 |
s->_rule[i] >= NUM_OPERANDS ) // not an operand |
|
1382 |
cost = s->_cost[mincost=i]; |
|
1383 |
} |
|
1384 |
if (mincost == max_juint) { |
|
1385 |
#ifndef PRODUCT |
|
1386 |
tty->print("No matching rule for:"); |
|
1387 |
s->dump(); |
|
1388 |
#endif |
|
1389 |
Matcher::soft_match_failure(); |
|
1390 |
return NULL; |
|
1391 |
} |
|
1392 |
// Reduce input tree based upon the state labels to machine Nodes |
|
1393 |
MachNode *m = ReduceInst( s, s->_rule[mincost], mem ); |
|
1394 |
#ifdef ASSERT |
|
1395 |
_old2new_map.map(n->_idx, m); |
|
768 | 1396 |
_new2old_map.map(m->_idx, (Node*)n); |
1 | 1397 |
#endif |
1398 |
||
1399 |
// Add any Matcher-ignored edges |
|
1400 |
uint cnt = n->req(); |
|
1401 |
uint start = 1; |
|
1402 |
if( mem != (Node*)1 ) start = MemNode::Memory+1; |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1403 |
if( n->is_AddP() ) { |
1 | 1404 |
assert( mem == (Node*)1, "" ); |
1405 |
start = AddPNode::Base+1; |
|
1406 |
} |
|
1407 |
for( i = start; i < cnt; i++ ) { |
|
1408 |
if( !n->match_edge(i) ) { |
|
1409 |
if( i < m->req() ) |
|
1410 |
m->ins_req( i, n->in(i) ); |
|
1411 |
else |
|
1412 |
m->add_req( n->in(i) ); |
|
1413 |
} |
|
1414 |
} |
|
1415 |
||
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1416 |
debug_only( _mem_node = save_mem_node; ) |
1 | 1417 |
return m; |
1418 |
} |
|
1419 |
||
1420 |
||
1421 |
//------------------------------match_into_reg--------------------------------- |
|
1422 |
// Choose to either match this Node in a register or part of the current |
|
1423 |
// match tree. Return true for requiring a register and false for matching |
|
1424 |
// as part of the current match tree. |
|
1425 |
static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) { |
|
1426 |
||
1427 |
const Type *t = m->bottom_type(); |
|
1428 |
||
11429
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1429 |
if (t->singleton()) { |
1 | 1430 |
// Never force constants into registers. Allow them to match as |
1431 |
// constants or registers. Copies of the same value will share |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1432 |
// the same register. See find_shared_node. |
1 | 1433 |
return false; |
1434 |
} else { // Not a constant |
|
1435 |
// Stop recursion if they have different Controls. |
|
11429
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1436 |
Node* m_control = m->in(0); |
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1437 |
// Control of load's memory can post-dominates load's control. |
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1438 |
// So use it since load can't float above its memory. |
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1439 |
Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL; |
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1440 |
if (control && m_control && control != m_control && control != mem_control) { |
1 | 1441 |
|
1442 |
// Actually, we can live with the most conservative control we |
|
1443 |
// find, if it post-dominates the others. This allows us to |
|
1444 |
// pick up load/op/store trees where the load can float a little |
|
1445 |
// above the store. |
|
1446 |
Node *x = control; |
|
11429
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1447 |
const uint max_scan = 6; // Arbitrary scan cutoff |
1 | 1448 |
uint j; |
11429
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1449 |
for (j=0; j<max_scan; j++) { |
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1450 |
if (x->is_Region()) // Bail out at merge points |
1 | 1451 |
return true; |
1452 |
x = x->in(0); |
|
11429
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1453 |
if (x == m_control) // Does 'control' post-dominate |
1 | 1454 |
break; // m->in(0)? If so, we can use it |
11429
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1455 |
if (x == mem_control) // Does 'control' post-dominate |
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1456 |
break; // mem_control? If so, we can use it |
1 | 1457 |
} |
11429
e894217a5d94
7121648: Use 3-operands SIMD instructions on x86 with AVX
kvn
parents:
10988
diff
changeset
|
1458 |
if (j == max_scan) // No post-domination before scan end? |
1 | 1459 |
return true; // Then break the match tree up |
1460 |
} |
|
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1461 |
if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) || |
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1462 |
(m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) { |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
1463 |
// These are commonly used in address expressions and can |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1464 |
// efficiently fold into them on X64 in some cases. |
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1465 |
return false; |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
1466 |
} |
1 | 1467 |
} |
1468 |
||
2131 | 1469 |
// Not forceable cloning. If shared, put it into a register. |
1 | 1470 |
return shared; |
1471 |
} |
|
1472 |
||
1473 |
||
1474 |
//------------------------------Instruction Selection-------------------------- |
|
1475 |
// Label method walks a "tree" of nodes, using the ADLC generated DFA to match |
|
1476 |
// ideal nodes to machine instructions. Trees are delimited by shared Nodes, |
|
1477 |
// things the Matcher does not match (e.g., Memory), and things with different |
|
1478 |
// Controls (hence forced into different blocks). We pass in the Control |
|
1479 |
// selected for this entire State tree. |
|
1480 |
||
1481 |
// The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the |
|
1482 |
// Store and the Load must have identical Memories (as well as identical |
|
1483 |
// pointers). Since the Matcher does not have anything for Memory (and |
|
1484 |
// does not handle DAGs), I have to match the Memory input myself. If the |
|
1485 |
// Tree root is a Store, I require all Loads to have the identical memory. |
|
1486 |
Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){ |
|
1487 |
// Since Label_Root is a recursive function, its possible that we might run |
|
1488 |
// out of stack space. See bugs 6272980 & 6227033 for more info. |
|
1489 |
LabelRootDepth++; |
|
1490 |
if (LabelRootDepth > MaxLabelRootDepth) { |
|
39431
cb1b2538c4b2
8159720: Failure of C2 compilation with tiered prevents some C1 compilations.
cvarming
parents:
39419
diff
changeset
|
1491 |
C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth"); |
1 | 1492 |
return NULL; |
1493 |
} |
|
1494 |
uint care = 0; // Edges matcher cares about |
|
1495 |
uint cnt = n->req(); |
|
1496 |
uint i = 0; |
|
1497 |
||
1498 |
// Examine children for memory state |
|
1499 |
// Can only subsume a child into your match-tree if that child's memory state |
|
1500 |
// is not modified along the path to another input. |
|
1501 |
// It is unsafe even if the other inputs are separate roots. |
|
1502 |
Node *input_mem = NULL; |
|
1503 |
for( i = 1; i < cnt; i++ ) { |
|
1504 |
if( !n->match_edge(i) ) continue; |
|
1505 |
Node *m = n->in(i); // Get ith input |
|
1506 |
assert( m, "expect non-null children" ); |
|
1507 |
if( m->is_Load() ) { |
|
1508 |
if( input_mem == NULL ) { |
|
1509 |
input_mem = m->in(MemNode::Memory); |
|
1510 |
} else if( input_mem != m->in(MemNode::Memory) ) { |
|
1511 |
input_mem = NodeSentinel; |
|
1512 |
} |
|
1513 |
} |
|
1514 |
} |
|
1515 |
||
1516 |
for( i = 1; i < cnt; i++ ){// For my children |
|
1517 |
if( !n->match_edge(i) ) continue; |
|
1518 |
Node *m = n->in(i); // Get ith input |
|
1519 |
// Allocate states out of a private arena |
|
1520 |
State *s = new (&_states_arena) State; |
|
1521 |
svec->_kids[care++] = s; |
|
1522 |
assert( care <= 2, "binary only for now" ); |
|
1523 |
||
1524 |
// Recursively label the State tree. |
|
1525 |
s->_kids[0] = NULL; |
|
1526 |
s->_kids[1] = NULL; |
|
1527 |
s->_leaf = m; |
|
1528 |
||
1529 |
// Check for leaves of the State Tree; things that cannot be a part of |
|
1530 |
// the current tree. If it finds any, that value is matched as a |
|
1531 |
// register operand. If not, then the normal matching is used. |
|
1532 |
if( match_into_reg(n, m, control, i, is_shared(m)) || |
|
1533 |
// |
|
1534 |
// Stop recursion if this is LoadNode and the root of this tree is a |
|
1535 |
// StoreNode and the load & store have different memories. |
|
1536 |
((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) || |
|
1537 |
// Can NOT include the match of a subtree when its memory state |
|
1538 |
// is used by any of the other subtrees |
|
1539 |
(input_mem == NodeSentinel) ) { |
|
1540 |
// Print when we exclude matching due to different memory states at input-loads |
|
34174
4db2fb26dc49
8140424: don't prefix developer and notproduct flag variables with CONST_ in product builds
twisti
parents:
33628
diff
changeset
|
1541 |
if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel) |
4db2fb26dc49
8140424: don't prefix developer and notproduct flag variables with CONST_ in product builds
twisti
parents:
33628
diff
changeset
|
1542 |
&& !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) { |
1 | 1543 |
tty->print_cr("invalid input_mem"); |
1544 |
} |
|
1545 |
// Switch to a register-only opcode; this value must be in a register |
|
1546 |
// and cannot be subsumed as part of a larger instruction. |
|
1547 |
s->DFA( m->ideal_reg(), m ); |
|
1548 |
||
1549 |
} else { |
|
1550 |
// If match tree has no control and we do, adopt it for entire tree |
|
1551 |
if( control == NULL && m->in(0) != NULL && m->req() > 1 ) |
|
1552 |
control = m->in(0); // Pick up control |
|
1553 |
// Else match as a normal part of the match tree. |
|
1554 |
control = Label_Root(m,s,control,mem); |
|
1555 |
if (C->failing()) return NULL; |
|
1556 |
} |
|
1557 |
} |
|
1558 |
||
1559 |
||
1560 |
// Call DFA to match this node, and return |
|
1561 |
svec->DFA( n->Opcode(), n ); |
|
1562 |
||
1563 |
#ifdef ASSERT |
|
1564 |
uint x; |
|
1565 |
for( x = 0; x < _LAST_MACH_OPER; x++ ) |
|
1566 |
if( svec->valid(x) ) |
|
1567 |
break; |
|
1568 |
||
1569 |
if (x >= _LAST_MACH_OPER) { |
|
1570 |
n->dump(); |
|
1571 |
svec->dump(); |
|
1572 |
assert( false, "bad AD file" ); |
|
1573 |
} |
|
1574 |
#endif |
|
1575 |
return control; |
|
1576 |
} |
|
1577 |
||
1578 |
||
1579 |
// Con nodes reduced using the same rule can share their MachNode |
|
1580 |
// which reduces the number of copies of a constant in the final |
|
1581 |
// program. The register allocator is free to split uses later to |
|
1582 |
// split live ranges. |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1583 |
MachNode* Matcher::find_shared_node(Node* leaf, uint rule) { |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1584 |
if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL; |
1 | 1585 |
|
1586 |
// See if this Con has already been reduced using this rule. |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1587 |
if (_shared_nodes.Size() <= leaf->_idx) return NULL; |
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1588 |
MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx); |
1 | 1589 |
if (last != NULL && rule == last->rule()) { |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1590 |
// Don't expect control change for DecodeN |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1591 |
if (leaf->is_DecodeNarrowPtr()) |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1592 |
return last; |
1 | 1593 |
// Get the new space root. |
1594 |
Node* xroot = new_node(C->root()); |
|
1595 |
if (xroot == NULL) { |
|
1596 |
// This shouldn't happen give the order of matching. |
|
1597 |
return NULL; |
|
1598 |
} |
|
1599 |
||
1600 |
// Shared constants need to have their control be root so they |
|
1601 |
// can be scheduled properly. |
|
1602 |
Node* control = last->in(0); |
|
1603 |
if (control != xroot) { |
|
1604 |
if (control == NULL || control == C->root()) { |
|
1605 |
last->set_req(0, xroot); |
|
1606 |
} else { |
|
1607 |
assert(false, "unexpected control"); |
|
1608 |
return NULL; |
|
1609 |
} |
|
1610 |
} |
|
1611 |
return last; |
|
1612 |
} |
|
1613 |
return NULL; |
|
1614 |
} |
|
1615 |
||
1616 |
||
1617 |
//------------------------------ReduceInst------------------------------------- |
|
1618 |
// Reduce a State tree (with given Control) into a tree of MachNodes. |
|
1619 |
// This routine (and it's cohort ReduceOper) convert Ideal Nodes into |
|
1620 |
// complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes. |
|
1621 |
// Each MachNode has a number of complicated MachOper operands; each |
|
1622 |
// MachOper also covers a further tree of Ideal Nodes. |
|
1623 |
||
1624 |
// The root of the Ideal match tree is always an instruction, so we enter |
|
1625 |
// the recursion here. After building the MachNode, we need to recurse |
|
1626 |
// the tree checking for these cases: |
|
1627 |
// (1) Child is an instruction - |
|
1628 |
// Build the instruction (recursively), add it as an edge. |
|
1629 |
// Build a simple operand (register) to hold the result of the instruction. |
|
1630 |
// (2) Child is an interior part of an instruction - |
|
1631 |
// Skip over it (do nothing) |
|
1632 |
// (3) Child is the start of a operand - |
|
1633 |
// Build the operand, place it inside the instruction |
|
1634 |
// Call ReduceOper. |
|
1635 |
MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) { |
|
1636 |
assert( rule >= NUM_OPERANDS, "called with operand rule" ); |
|
1637 |
||
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1638 |
MachNode* shared_node = find_shared_node(s->_leaf, rule); |
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1639 |
if (shared_node != NULL) { |
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1640 |
return shared_node; |
1 | 1641 |
} |
1642 |
||
1643 |
// Build the object to represent this state & prepare for recursive calls |
|
25930 | 1644 |
MachNode *mach = s->MachNodeGenerator(rule); |
1645 |
mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]); |
|
1 | 1646 |
assert( mach->_opnds[0] != NULL, "Missing result operand" ); |
1647 |
Node *leaf = s->_leaf; |
|
1648 |
// Check for instruction or instruction chain rule |
|
1649 |
if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) { |
|
1071
f331132bffdc
6732698: crash with dead code from compressed oops in gcm
never
parents:
781
diff
changeset
|
1650 |
assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf), |
f331132bffdc
6732698: crash with dead code from compressed oops in gcm
never
parents:
781
diff
changeset
|
1651 |
"duplicating node that's already been matched"); |
1 | 1652 |
// Instruction |
1653 |
mach->add_req( leaf->in(0) ); // Set initial control |
|
1654 |
// Reduce interior of complex instruction |
|
1655 |
ReduceInst_Interior( s, rule, mem, mach, 1 ); |
|
1656 |
} else { |
|
1657 |
// Instruction chain rules are data-dependent on their inputs |
|
1658 |
mach->add_req(0); // Set initial control to none |
|
1659 |
ReduceInst_Chain_Rule( s, rule, mem, mach ); |
|
1660 |
} |
|
1661 |
||
1662 |
// If a Memory was used, insert a Memory edge |
|
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1663 |
if( mem != (Node*)1 ) { |
1 | 1664 |
mach->ins_req(MemNode::Memory,mem); |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1665 |
#ifdef ASSERT |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1666 |
// Verify adr type after matching memory operation |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1667 |
const MachOper* oper = mach->memory_operand(); |
3268
f034e0c86895
6851742: (EA) allocation elimination doesn't work with UseG1GC
kvn
parents:
3176
diff
changeset
|
1668 |
if (oper != NULL && oper != (MachOper*)-1) { |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1669 |
// It has a unique memory operand. Find corresponding ideal mem node. |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1670 |
Node* m = NULL; |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1671 |
if (leaf->is_Mem()) { |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1672 |
m = leaf; |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1673 |
} else { |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1674 |
m = _mem_node; |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1675 |
assert(m != NULL && m->is_Mem(), "expecting memory node"); |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1676 |
} |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
1677 |
const Type* mach_at = mach->adr_type(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
1678 |
// DecodeN node consumed by an address may have different type |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
1679 |
// then its input. Don't compare types for such case. |
2254
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
2131
diff
changeset
|
1680 |
if (m->adr_type() != mach_at && |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1681 |
(m->in(MemNode::Address)->is_DecodeNarrowPtr() || |
2254
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
2131
diff
changeset
|
1682 |
m->in(MemNode::Address)->is_AddP() && |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1683 |
m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr() || |
2254
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
2131
diff
changeset
|
1684 |
m->in(MemNode::Address)->is_AddP() && |
f13dda645a4b
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
2131
diff
changeset
|
1685 |
m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() && |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1686 |
m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr())) { |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
1687 |
mach_at = m->adr_type(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
1688 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
1689 |
if (m->adr_type() != mach_at) { |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1690 |
m->dump(); |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1691 |
tty->print_cr("mach:"); |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1692 |
mach->dump(1); |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1693 |
} |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
1694 |
assert(m->adr_type() == mach_at, "matcher should not change adr type"); |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1695 |
} |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1696 |
#endif |
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1697 |
} |
1 | 1698 |
|
1699 |
// If the _leaf is an AddP, insert the base edge |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1700 |
if (leaf->is_AddP()) { |
1 | 1701 |
mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base)); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1702 |
} |
1 | 1703 |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1704 |
uint number_of_projections_prior = number_of_projections(); |
1 | 1705 |
|
1706 |
// Perform any 1-to-many expansions required |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1707 |
MachNode *ex = mach->Expand(s, _projection_list, mem); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1708 |
if (ex != mach) { |
1 | 1709 |
assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match"); |
1710 |
if( ex->in(1)->is_Con() ) |
|
1711 |
ex->in(1)->set_req(0, C->root()); |
|
1712 |
// Remove old node from the graph |
|
1713 |
for( uint i=0; i<mach->req(); i++ ) { |
|
1714 |
mach->set_req(i,NULL); |
|
1715 |
} |
|
768 | 1716 |
#ifdef ASSERT |
1717 |
_new2old_map.map(ex->_idx, s->_leaf); |
|
1718 |
#endif |
|
1 | 1719 |
} |
1720 |
||
1721 |
// PhaseChaitin::fixup_spills will sometimes generate spill code |
|
1722 |
// via the matcher. By the time, nodes have been wired into the CFG, |
|
1723 |
// and any further nodes generated by expand rules will be left hanging |
|
1724 |
// in space, and will not get emitted as output code. Catch this. |
|
1725 |
// Also, catch any new register allocation constraints ("projections") |
|
1726 |
// generated belatedly during spill code generation. |
|
1727 |
if (_allocation_started) { |
|
1728 |
guarantee(ex == mach, "no expand rules during spill generation"); |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
18956
diff
changeset
|
1729 |
guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation"); |
1 | 1730 |
} |
1731 |
||
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
1732 |
if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) { |
1 | 1733 |
// Record the con for sharing |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1734 |
_shared_nodes.map(leaf->_idx, ex); |
1 | 1735 |
} |
1736 |
||
1737 |
return ex; |
|
1738 |
} |
|
1739 |
||
30300
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1740 |
void Matcher::handle_precedence_edges(Node* n, MachNode *mach) { |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1741 |
for (uint i = n->req(); i < n->len(); i++) { |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1742 |
if (n->in(i) != NULL) { |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1743 |
mach->add_prec(n->in(i)); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1744 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1745 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1746 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1747 |
|
1 | 1748 |
void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) { |
1749 |
// 'op' is what I am expecting to receive |
|
1750 |
int op = _leftOp[rule]; |
|
1751 |
// Operand type to catch childs result |
|
1752 |
// This is what my child will give me. |
|
1753 |
int opnd_class_instance = s->_rule[op]; |
|
1754 |
// Choose between operand class or not. |
|
2131 | 1755 |
// This is what I will receive. |
1 | 1756 |
int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op; |
1757 |
// New rule for child. Chase operand classes to get the actual rule. |
|
1758 |
int newrule = s->_rule[catch_op]; |
|
1759 |
||
1760 |
if( newrule < NUM_OPERANDS ) { |
|
1761 |
// Chain from operand or operand class, may be output of shared node |
|
1762 |
assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS, |
|
1763 |
"Bad AD file: Instruction chain rule must chain from operand"); |
|
1764 |
// Insert operand into array of operands for this instruction |
|
25930 | 1765 |
mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance); |
1 | 1766 |
|
1767 |
ReduceOper( s, newrule, mem, mach ); |
|
1768 |
} else { |
|
1769 |
// Chain from the result of an instruction |
|
1770 |
assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand"); |
|
25930 | 1771 |
mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]); |
1 | 1772 |
Node *mem1 = (Node*)1; |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1773 |
debug_only(Node *save_mem_node = _mem_node;) |
1 | 1774 |
mach->add_req( ReduceInst(s, newrule, mem1) ); |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1775 |
debug_only(_mem_node = save_mem_node;) |
1 | 1776 |
} |
1777 |
return; |
|
1778 |
} |
|
1779 |
||
1780 |
||
1781 |
uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) { |
|
30300
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1782 |
handle_precedence_edges(s->_leaf, mach); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1783 |
|
1 | 1784 |
if( s->_leaf->is_Load() ) { |
1785 |
Node *mem2 = s->_leaf->in(MemNode::Memory); |
|
1786 |
assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" ); |
|
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1787 |
debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;) |
1 | 1788 |
mem = mem2; |
1789 |
} |
|
1790 |
if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) { |
|
1791 |
if( mach->in(0) == NULL ) |
|
1792 |
mach->set_req(0, s->_leaf->in(0)); |
|
1793 |
} |
|
1794 |
||
1795 |
// Now recursively walk the state tree & add operand list. |
|
1796 |
for( uint i=0; i<2; i++ ) { // binary tree |
|
1797 |
State *newstate = s->_kids[i]; |
|
1798 |
if( newstate == NULL ) break; // Might only have 1 child |
|
1799 |
// 'op' is what I am expecting to receive |
|
1800 |
int op; |
|
1801 |
if( i == 0 ) { |
|
1802 |
op = _leftOp[rule]; |
|
1803 |
} else { |
|
1804 |
op = _rightOp[rule]; |
|
1805 |
} |
|
1806 |
// Operand type to catch childs result |
|
1807 |
// This is what my child will give me. |
|
1808 |
int opnd_class_instance = newstate->_rule[op]; |
|
1809 |
// Choose between operand class or not. |
|
1810 |
// This is what I will receive. |
|
1811 |
int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op; |
|
1812 |
// New rule for child. Chase operand classes to get the actual rule. |
|
1813 |
int newrule = newstate->_rule[catch_op]; |
|
1814 |
||
1815 |
if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction? |
|
1816 |
// Operand/operandClass |
|
1817 |
// Insert operand into array of operands for this instruction |
|
25930 | 1818 |
mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance); |
1 | 1819 |
ReduceOper( newstate, newrule, mem, mach ); |
1820 |
||
1821 |
} else { // Child is internal operand or new instruction |
|
1822 |
if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction? |
|
1823 |
// internal operand --> call ReduceInst_Interior |
|
1824 |
// Interior of complex instruction. Do nothing but recurse. |
|
1825 |
num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds ); |
|
1826 |
} else { |
|
1827 |
// instruction --> call build operand( ) to catch result |
|
1828 |
// --> ReduceInst( newrule ) |
|
25930 | 1829 |
mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]); |
1 | 1830 |
Node *mem1 = (Node*)1; |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1831 |
debug_only(Node *save_mem_node = _mem_node;) |
1 | 1832 |
mach->add_req( ReduceInst( newstate, newrule, mem1 ) ); |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1833 |
debug_only(_mem_node = save_mem_node;) |
1 | 1834 |
} |
1835 |
} |
|
1836 |
assert( mach->_opnds[num_opnds-1], "" ); |
|
1837 |
} |
|
1838 |
return num_opnds; |
|
1839 |
} |
|
1840 |
||
1841 |
// This routine walks the interior of possible complex operands. |
|
1842 |
// At each point we check our children in the match tree: |
|
1843 |
// (1) No children - |
|
1844 |
// We are a leaf; add _leaf field as an input to the MachNode |
|
1845 |
// (2) Child is an internal operand - |
|
1846 |
// Skip over it ( do nothing ) |
|
1847 |
// (3) Child is an instruction - |
|
1848 |
// Call ReduceInst recursively and |
|
1849 |
// and instruction as an input to the MachNode |
|
1850 |
void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) { |
|
1851 |
assert( rule < _LAST_MACH_OPER, "called with operand rule" ); |
|
1852 |
State *kid = s->_kids[0]; |
|
1853 |
assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" ); |
|
1854 |
||
1855 |
// Leaf? And not subsumed? |
|
1856 |
if( kid == NULL && !_swallowed[rule] ) { |
|
1857 |
mach->add_req( s->_leaf ); // Add leaf pointer |
|
1858 |
return; // Bail out |
|
1859 |
} |
|
1860 |
||
1861 |
if( s->_leaf->is_Load() ) { |
|
1862 |
assert( mem == (Node*)1, "multiple Memories being matched at once?" ); |
|
1863 |
mem = s->_leaf->in(MemNode::Memory); |
|
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1864 |
debug_only(_mem_node = s->_leaf;) |
1 | 1865 |
} |
30300
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1866 |
|
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1867 |
handle_precedence_edges(s->_leaf, mach); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
29083
diff
changeset
|
1868 |
|
1 | 1869 |
if( s->_leaf->in(0) && s->_leaf->req() > 1) { |
1870 |
if( !mach->in(0) ) |
|
1871 |
mach->set_req(0,s->_leaf->in(0)); |
|
1872 |
else { |
|
1873 |
assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" ); |
|
1874 |
} |
|
1875 |
} |
|
1876 |
||
1877 |
for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) { // binary tree |
|
1878 |
int newrule; |
|
18025 | 1879 |
if( i == 0) |
1 | 1880 |
newrule = kid->_rule[_leftOp[rule]]; |
1881 |
else |
|
1882 |
newrule = kid->_rule[_rightOp[rule]]; |
|
1883 |
||
1884 |
if( newrule < _LAST_MACH_OPER ) { // Operand or instruction? |
|
1885 |
// Internal operand; recurse but do nothing else |
|
1886 |
ReduceOper( kid, newrule, mem, mach ); |
|
1887 |
||
1888 |
} else { // Child is a new instruction |
|
1889 |
// Reduce the instruction, and add a direct pointer from this |
|
1890 |
// machine instruction to the newly reduced one. |
|
1891 |
Node *mem1 = (Node*)1; |
|
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1892 |
debug_only(Node *save_mem_node = _mem_node;) |
1 | 1893 |
mach->add_req( ReduceInst( kid, newrule, mem1 ) ); |
762
1b26adb5fea1
6715633: when matching a memory node the adr_type should not change
kvn
parents:
595
diff
changeset
|
1894 |
debug_only(_mem_node = save_mem_node;) |
1 | 1895 |
} |
1896 |
} |
|
1897 |
} |
|
1898 |
||
1899 |
||
1900 |
// ------------------------------------------------------------------------- |
|
1901 |
// Java-Java calling convention |
|
1902 |
// (what you use when Java calls Java) |
|
1903 |
||
1904 |
//------------------------------find_receiver---------------------------------- |
|
1905 |
// For a given signature, return the OptoReg for parameter 0. |
|
1906 |
OptoReg::Name Matcher::find_receiver( bool is_outgoing ) { |
|
1907 |
VMRegPair regs; |
|
1908 |
BasicType sig_bt = T_OBJECT; |
|
1909 |
calling_convention(&sig_bt, ®s, 1, is_outgoing); |
|
1910 |
// Return argument 0 register. In the LP64 build pointers |
|
1911 |
// take 2 registers, but the VM wants only the 'main' name. |
|
1912 |
return OptoReg::as_OptoReg(regs.first()); |
|
1913 |
} |
|
1914 |
||
23220
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1915 |
// This function identifies sub-graphs in which a 'load' node is |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1916 |
// input to two different nodes, and such that it can be matched |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1917 |
// with BMI instructions like blsi, blsr, etc. |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1918 |
// Example : for b = -a[i] & a[i] can be matched to blsi r32, m32. |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1919 |
// The graph is (AndL (SubL Con0 LoadL*) LoadL*), where LoadL* |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1920 |
// refers to the same node. |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1921 |
#ifdef X86 |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1922 |
// Match the generic fused operations pattern (op1 (op2 Con{ConType} mop) mop) |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1923 |
// This is a temporary solution until we make DAGs expressible in ADL. |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1924 |
template<typename ConType> |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1925 |
class FusedPatternMatcher { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1926 |
Node* _op1_node; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1927 |
Node* _mop_node; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1928 |
int _con_op; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1929 |
|
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1930 |
static int match_next(Node* n, int next_op, int next_op_idx) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1931 |
if (n->in(1) == NULL || n->in(2) == NULL) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1932 |
return -1; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1933 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1934 |
|
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1935 |
if (next_op_idx == -1) { // n is commutative, try rotations |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1936 |
if (n->in(1)->Opcode() == next_op) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1937 |
return 1; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1938 |
} else if (n->in(2)->Opcode() == next_op) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1939 |
return 2; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1940 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1941 |
} else { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1942 |
assert(next_op_idx > 0 && next_op_idx <= 2, "Bad argument index"); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1943 |
if (n->in(next_op_idx)->Opcode() == next_op) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1944 |
return next_op_idx; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1945 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1946 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1947 |
return -1; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1948 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1949 |
public: |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1950 |
FusedPatternMatcher(Node* op1_node, Node *mop_node, int con_op) : |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1951 |
_op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { } |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1952 |
|
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1953 |
bool match(int op1, int op1_op2_idx, // op1 and the index of the op1->op2 edge, -1 if op1 is commutative |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1954 |
int op2, int op2_con_idx, // op2 and the index of the op2->con edge, -1 if op2 is commutative |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1955 |
typename ConType::NativeType con_value) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1956 |
if (_op1_node->Opcode() != op1) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1957 |
return false; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1958 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1959 |
if (_mop_node->outcnt() > 2) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1960 |
return false; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1961 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1962 |
op1_op2_idx = match_next(_op1_node, op2, op1_op2_idx); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1963 |
if (op1_op2_idx == -1) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1964 |
return false; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1965 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1966 |
// Memory operation must be the other edge |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1967 |
int op1_mop_idx = (op1_op2_idx & 1) + 1; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1968 |
|
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1969 |
// Check that the mop node is really what we want |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1970 |
if (_op1_node->in(op1_mop_idx) == _mop_node) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1971 |
Node *op2_node = _op1_node->in(op1_op2_idx); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1972 |
if (op2_node->outcnt() > 1) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1973 |
return false; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1974 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1975 |
assert(op2_node->Opcode() == op2, "Should be"); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1976 |
op2_con_idx = match_next(op2_node, _con_op, op2_con_idx); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1977 |
if (op2_con_idx == -1) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1978 |
return false; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1979 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1980 |
// Memory operation must be the other edge |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1981 |
int op2_mop_idx = (op2_con_idx & 1) + 1; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1982 |
// Check that the memory operation is the same node |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1983 |
if (op2_node->in(op2_mop_idx) == _mop_node) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1984 |
// Now check the constant |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1985 |
const Type* con_type = op2_node->in(op2_con_idx)->bottom_type(); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1986 |
if (con_type != Type::TOP && ConType::as_self(con_type)->get_con() == con_value) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1987 |
return true; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1988 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1989 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1990 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1991 |
return false; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1992 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1993 |
}; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1994 |
|
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1995 |
|
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1996 |
bool Matcher::is_bmi_pattern(Node *n, Node *m) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1997 |
if (n != NULL && m != NULL) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1998 |
if (m->Opcode() == Op_LoadI) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
1999 |
FusedPatternMatcher<TypeInt> bmii(n, m, Op_ConI); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2000 |
return bmii.match(Op_AndI, -1, Op_SubI, 1, 0) || |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2001 |
bmii.match(Op_AndI, -1, Op_AddI, -1, -1) || |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2002 |
bmii.match(Op_XorI, -1, Op_AddI, -1, -1); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2003 |
} else if (m->Opcode() == Op_LoadL) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2004 |
FusedPatternMatcher<TypeLong> bmil(n, m, Op_ConL); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2005 |
return bmil.match(Op_AndL, -1, Op_SubL, 1, 0) || |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2006 |
bmil.match(Op_AndL, -1, Op_AddL, -1, -1) || |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2007 |
bmil.match(Op_XorL, -1, Op_AddL, -1, -1); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2008 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2009 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2010 |
return false; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2011 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2012 |
#endif // X86 |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2013 |
|
38286
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2014 |
bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2015 |
Node *off = m->in(AddPNode::Offset); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2016 |
if (off->is_Con()) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2017 |
address_visited.test_set(m->_idx); // Flag as address_visited |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2018 |
mstack.push(m->in(AddPNode::Address), Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2019 |
// Clone X+offset as it also folds into most addressing expressions |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2020 |
mstack.push(off, Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2021 |
mstack.push(m->in(AddPNode::Base), Pre_Visit); |
33082
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2022 |
return true; |
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2023 |
} |
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2024 |
return false; |
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2025 |
} |
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2026 |
|
38286
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2027 |
// A method-klass-holder may be passed in the inline_cache_reg |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2028 |
// and then expanded into the inline_cache_reg and a method_oop register |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2029 |
// defined in ad_<arch>.cpp |
1 | 2030 |
|
2031 |
//------------------------------find_shared------------------------------------ |
|
2032 |
// Set bits if Node is shared or otherwise a root |
|
2033 |
void Matcher::find_shared( Node *n ) { |
|
33158
f4e6c593ba73
8137160: Use Compile::live_nodes instead of Compile::unique() in appropriate places -- followup
zmajo
parents:
33082
diff
changeset
|
2034 |
// Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc |
32202
7e7ad8b06f5b
8011858: Use Compile::live_nodes() instead of Compile::unique() in appropriate places
kvn
parents:
31035
diff
changeset
|
2035 |
MStack mstack(C->live_nodes() * 2); |
2112
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2036 |
// Mark nodes as address_visited if they are inputs to an address expression |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2037 |
VectorSet address_visited(Thread::current()->resource_area()); |
1 | 2038 |
mstack.push(n, Visit); // Don't need to pre-visit root node |
2039 |
while (mstack.is_nonempty()) { |
|
2040 |
n = mstack.node(); // Leave node on stack |
|
2041 |
Node_State nstate = mstack.state(); |
|
2112
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2042 |
uint nop = n->Opcode(); |
1 | 2043 |
if (nstate == Pre_Visit) { |
2112
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2044 |
if (address_visited.test(n->_idx)) { // Visited in address already? |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2045 |
// Flag as visited and shared now. |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2046 |
set_visited(n); |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2047 |
} |
1 | 2048 |
if (is_visited(n)) { // Visited already? |
2049 |
// Node is shared and has no reason to clone. Flag it as shared. |
|
2050 |
// This causes it to match into a register for the sharing. |
|
2051 |
set_shared(n); // Flag as shared and |
|
2052 |
mstack.pop(); // remove node from stack |
|
2053 |
continue; |
|
2054 |
} |
|
2055 |
nstate = Visit; // Not already visited; so visit now |
|
2056 |
} |
|
2057 |
if (nstate == Visit) { |
|
2058 |
mstack.set_state(Post_Visit); |
|
2059 |
set_visited(n); // Flag as visited now |
|
2060 |
bool mem_op = false; |
|
2061 |
||
2112
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2062 |
switch( nop ) { // Handle some opcodes special |
1 | 2063 |
case Op_Phi: // Treat Phis as shared roots |
2064 |
case Op_Parm: |
|
2065 |
case Op_Proj: // All handled specially during matching |
|
236
9a04268c8eea
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
1
diff
changeset
|
2066 |
case Op_SafePointScalarObject: |
1 | 2067 |
set_shared(n); |
2068 |
set_dontcare(n); |
|
2069 |
break; |
|
2070 |
case Op_If: |
|
2071 |
case Op_CountedLoopEnd: |
|
2072 |
mstack.set_state(Alt_Post_Visit); // Alternative way |
|
2073 |
// Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps |
|
2074 |
// with matching cmp/branch in 1 instruction. The Matcher needs the |
|
2075 |
// Bool and CmpX side-by-side, because it can only get at constants |
|
2076 |
// that are at the leaves of Match trees, and the Bool's condition acts |
|
2077 |
// as a constant here. |
|
2078 |
mstack.push(n->in(1), Visit); // Clone the Bool |
|
2079 |
mstack.push(n->in(0), Pre_Visit); // Visit control input |
|
2080 |
continue; // while (mstack.is_nonempty()) |
|
2081 |
case Op_ConvI2D: // These forms efficiently match with a prior |
|
2082 |
case Op_ConvI2F: // Load but not a following Store |
|
2083 |
if( n->in(1)->is_Load() && // Prior load |
|
2084 |
n->outcnt() == 1 && // Not already shared |
|
2085 |
n->unique_out()->is_Store() ) // Following store |
|
2086 |
set_shared(n); // Force it to be a root |
|
2087 |
break; |
|
2088 |
case Op_ReverseBytesI: |
|
2089 |
case Op_ReverseBytesL: |
|
2090 |
if( n->in(1)->is_Load() && // Prior load |
|
2091 |
n->outcnt() == 1 ) // Not already shared |
|
2092 |
set_shared(n); // Force it to be a root |
|
2093 |
break; |
|
2094 |
case Op_BoxLock: // Cant match until we get stack-regs in ADLC |
|
2095 |
case Op_IfFalse: |
|
2096 |
case Op_IfTrue: |
|
2097 |
case Op_MachProj: |
|
2098 |
case Op_MergeMem: |
|
2099 |
case Op_Catch: |
|
2100 |
case Op_CatchProj: |
|
2101 |
case Op_CProj: |
|
2102 |
case Op_JumpProj: |
|
2103 |
case Op_JProj: |
|
2104 |
case Op_NeverBranch: |
|
2105 |
set_dontcare(n); |
|
2106 |
break; |
|
2107 |
case Op_Jump: |
|
10988
a3b2bd43ef4f
7107042: assert(no_dead_loop) failed: dead loop detected
kvn
parents:
10518
diff
changeset
|
2108 |
mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared) |
1 | 2109 |
mstack.push(n->in(0), Pre_Visit); // Visit Control input |
2110 |
continue; // while (mstack.is_nonempty()) |
|
2111 |
case Op_StrComp: |
|
2348 | 2112 |
case Op_StrEquals: |
2113 |
case Op_StrIndexOf: |
|
33628 | 2114 |
case Op_StrIndexOfChar: |
595
a2be4c89de81
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
594
diff
changeset
|
2115 |
case Op_AryEq: |
33628 | 2116 |
case Op_HasNegatives: |
2117 |
case Op_StrInflatedCopy: |
|
2118 |
case Op_StrCompressedCopy: |
|
15242
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
14623
diff
changeset
|
2119 |
case Op_EncodeISOArray: |
1 | 2120 |
set_shared(n); // Force result into register (it will be anyways) |
2121 |
break; |
|
2122 |
case Op_ConP: { // Convert pointers above the centerline to NUL |
|
2123 |
TypeNode *tn = n->as_Type(); // Constants derive from type nodes |
|
2124 |
const TypePtr* tp = tn->type()->is_ptr(); |
|
2125 |
if (tp->_ptr == TypePtr::AnyNull) { |
|
2126 |
tn->set_type(TypePtr::NULL_PTR); |
|
2127 |
} |
|
2128 |
break; |
|
2129 |
} |
|
589 | 2130 |
case Op_ConN: { // Convert narrow pointers above the centerline to NUL |
2131 |
TypeNode *tn = n->as_Type(); // Constants derive from type nodes |
|
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
762
diff
changeset
|
2132 |
const TypePtr* tp = tn->type()->make_ptr(); |
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
762
diff
changeset
|
2133 |
if (tp && tp->_ptr == TypePtr::AnyNull) { |
589 | 2134 |
tn->set_type(TypeNarrowOop::NULL_PTR); |
2135 |
} |
|
2136 |
break; |
|
2137 |
} |
|
1 | 2138 |
case Op_Binary: // These are introduced in the Post_Visit state. |
2139 |
ShouldNotReachHere(); |
|
2140 |
break; |
|
2141 |
case Op_ClearArray: |
|
2142 |
case Op_SafePoint: |
|
2143 |
mem_op = true; |
|
2144 |
break; |
|
4431
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2145 |
default: |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2146 |
if( n->is_Store() ) { |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2147 |
// Do match stores, despite no ideal reg |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2148 |
mem_op = true; |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2149 |
break; |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2150 |
} |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2151 |
if( n->is_Mem() ) { // Loads and LoadStores |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2152 |
mem_op = true; |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2153 |
// Loads must be root of match tree due to prior load conflict |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2154 |
if( C->subsume_loads() == false ) |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2155 |
set_shared(n); |
1 | 2156 |
} |
2157 |
// Fall into default case |
|
2158 |
if( !n->ideal_reg() ) |
|
2159 |
set_dontcare(n); // Unmatchable Nodes |
|
2160 |
} // end_switch |
|
2161 |
||
2162 |
for(int i = n->req() - 1; i >= 0; --i) { // For my children |
|
2163 |
Node *m = n->in(i); // Get ith input |
|
2164 |
if (m == NULL) continue; // Ignore NULLs |
|
2165 |
uint mop = m->Opcode(); |
|
2166 |
||
2167 |
// Must clone all producers of flags, or we will not match correctly. |
|
2168 |
// Suppose a compare setting int-flags is shared (e.g., a switch-tree) |
|
2169 |
// then it will match into an ideal Op_RegFlags. Alas, the fp-flags |
|
2170 |
// are also there, so we may match a float-branch to int-flags and |
|
2171 |
// expect the allocator to haul the flags from the int-side to the |
|
2172 |
// fp-side. No can do. |
|
2173 |
if( _must_clone[mop] ) { |
|
2174 |
mstack.push(m, Visit); |
|
2175 |
continue; // for(int i = ...) |
|
2176 |
} |
|
2177 |
||
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2178 |
if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) { |
4431
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2179 |
// Bases used in addresses must be shared but since |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2180 |
// they are shared through a DecodeN they may appear |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2181 |
// to have a single use so force sharing here. |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2182 |
set_shared(m->in(AddPNode::Base)->in(1)); |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2183 |
} |
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2184 |
|
23220
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2185 |
// if 'n' and 'm' are part of a graph for BMI instruction, clone this node. |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2186 |
#ifdef X86 |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2187 |
if (UseBMI1Instructions && is_bmi_pattern(n, m)) { |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2188 |
mstack.push(m, Visit); |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2189 |
continue; |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2190 |
} |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2191 |
#endif |
fc827339dc37
8031321: Support Intel bit manipulation instructions
iveresov
parents:
22911
diff
changeset
|
2192 |
|
4431
98ff8f025c55
6896370: CTW fails share/vm/opto/matcher.cpp:1475 "duplicating node that's already been matched"
kvn
parents:
3905
diff
changeset
|
2193 |
// Clone addressing expressions as they are "free" in memory access instructions |
33082
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2194 |
if (mem_op && i == MemNode::Address && mop == Op_AddP && |
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2195 |
// When there are other uses besides address expressions |
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2196 |
// put it on stack and mark as shared. |
c3e302e8e429
8136820: Generate better code for some Unsafe addressing patterns
roland
parents:
32202
diff
changeset
|
2197 |
!is_visited(m)) { |
2112
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2198 |
// Some inputs for address expression are not put on stack |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2199 |
// to avoid marking them as shared and forcing them into register |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2200 |
// if they are used only in address expressions. |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2201 |
// But they should be marked as shared if there are other uses |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2202 |
// besides address expressions. |
df46c83588fe
6791572: assert("duplicating node that's already been matched")
kvn
parents:
2022
diff
changeset
|
2203 |
|
38286
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2204 |
if (clone_address_expressions(m->as_AddP(), mstack, address_visited)) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2205 |
continue; |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38033
diff
changeset
|
2206 |
} |
1 | 2207 |
} // if( mem_op && |
2208 |
mstack.push(m, Pre_Visit); |
|
2209 |
} // for(int i = ...) |
|
2210 |
} |
|
2211 |
else if (nstate == Alt_Post_Visit) { |
|
2212 |
mstack.pop(); // Remove node from stack |
|
2213 |
// We cannot remove the Cmp input from the Bool here, as the Bool may be |
|
2214 |
// shared and all users of the Bool need to move the Cmp in parallel. |
|
2215 |
// This leaves both the Bool and the If pointing at the Cmp. To |
|
2216 |
// prevent the Matcher from trying to Match the Cmp along both paths |
|
2217 |
// BoolNode::match_edge always returns a zero. |
|
2218 |
||
2219 |
// We reorder the Op_If in a pre-order manner, so we can visit without |
|
2131 | 2220 |
// accidentally sharing the Cmp (the Bool and the If make 2 users). |
1 | 2221 |
n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool |
2222 |
} |
|
2223 |
else if (nstate == Post_Visit) { |
|
2224 |
mstack.pop(); // Remove node from stack |
|
2225 |
||
2226 |
// Now hack a few special opcodes |
|
2227 |
switch( n->Opcode() ) { // Handle some opcodes special |
|
2228 |
case Op_StorePConditional: |
|
1500
bea9a90f3e8f
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
1400
diff
changeset
|
2229 |
case Op_StoreIConditional: |
1 | 2230 |
case Op_StoreLConditional: |
39419
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2231 |
case Op_CompareAndExchangeB: |
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2232 |
case Op_CompareAndExchangeS: |
36316
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2233 |
case Op_CompareAndExchangeI: |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2234 |
case Op_CompareAndExchangeL: |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2235 |
case Op_CompareAndExchangeP: |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2236 |
case Op_CompareAndExchangeN: |
39419
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2237 |
case Op_WeakCompareAndSwapB: |
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2238 |
case Op_WeakCompareAndSwapS: |
36316
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2239 |
case Op_WeakCompareAndSwapI: |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2240 |
case Op_WeakCompareAndSwapL: |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2241 |
case Op_WeakCompareAndSwapP: |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2242 |
case Op_WeakCompareAndSwapN: |
39419
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2243 |
case Op_CompareAndSwapB: |
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2244 |
case Op_CompareAndSwapS: |
1 | 2245 |
case Op_CompareAndSwapI: |
2246 |
case Op_CompareAndSwapL: |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2247 |
case Op_CompareAndSwapP: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2248 |
case Op_CompareAndSwapN: { // Convert trinary to binary-tree |
1 | 2249 |
Node *newval = n->in(MemNode::ValueIn ); |
13886
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2250 |
Node *oldval = n->in(LoadStoreConditionalNode::ExpectedIn); |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2251 |
Node *pair = new BinaryNode( oldval, newval ); |
1 | 2252 |
n->set_req(MemNode::ValueIn,pair); |
13886
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13728
diff
changeset
|
2253 |
n->del_req(LoadStoreConditionalNode::ExpectedIn); |
1 | 2254 |
break; |
2255 |
} |
|
2256 |
case Op_CMoveD: // Convert trinary to binary-tree |
|
2257 |
case Op_CMoveF: |
|
2258 |
case Op_CMoveI: |
|
2259 |
case Op_CMoveL: |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2260 |
case Op_CMoveN: |
33469
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33158
diff
changeset
|
2261 |
case Op_CMoveP: |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33158
diff
changeset
|
2262 |
case Op_CMoveVD: { |
1 | 2263 |
// Restructure into a binary tree for Matching. It's possible that |
2264 |
// we could move this code up next to the graph reshaping for IfNodes |
|
2265 |
// or vice-versa, but I do not want to debug this for Ladybird. |
|
2266 |
// 10/2/2000 CNC. |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2267 |
Node *pair1 = new BinaryNode(n->in(1),n->in(1)->in(1)); |
1 | 2268 |
n->set_req(1,pair1); |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2269 |
Node *pair2 = new BinaryNode(n->in(2),n->in(3)); |
1 | 2270 |
n->set_req(2,pair2); |
2271 |
n->del_req(3); |
|
2272 |
break; |
|
2273 |
} |
|
9446 | 2274 |
case Op_LoopLimit: { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2275 |
Node *pair1 = new BinaryNode(n->in(1),n->in(2)); |
9446 | 2276 |
n->set_req(1,pair1); |
2277 |
n->set_req(2,n->in(3)); |
|
2278 |
n->del_req(3); |
|
2279 |
break; |
|
2280 |
} |
|
33628 | 2281 |
case Op_StrEquals: |
2282 |
case Op_StrIndexOfChar: { |
|
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2283 |
Node *pair1 = new BinaryNode(n->in(2),n->in(3)); |
3905
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2284 |
n->set_req(2,pair1); |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2285 |
n->set_req(3,n->in(4)); |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2286 |
n->del_req(4); |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2287 |
break; |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2288 |
} |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2289 |
case Op_StrComp: |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2290 |
case Op_StrIndexOf: { |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2291 |
Node *pair1 = new BinaryNode(n->in(2),n->in(3)); |
3905
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2292 |
n->set_req(2,pair1); |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2293 |
Node *pair2 = new BinaryNode(n->in(4),n->in(5)); |
3905
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2294 |
n->set_req(3,pair2); |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2295 |
n->del_req(5); |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2296 |
n->del_req(4); |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2297 |
break; |
7d725029ac85
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
3268
diff
changeset
|
2298 |
} |
33628 | 2299 |
case Op_StrCompressedCopy: |
2300 |
case Op_StrInflatedCopy: |
|
15242
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
14623
diff
changeset
|
2301 |
case Op_EncodeISOArray: { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
14623
diff
changeset
|
2302 |
// Restructure into a binary tree for Matching. |
24923
9631f7d691dc
8034812: remove IDX_INIT macro hack in Node class
thartmann
parents:
24424
diff
changeset
|
2303 |
Node* pair = new BinaryNode(n->in(3), n->in(4)); |
15242
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
14623
diff
changeset
|
2304 |
n->set_req(3, pair); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
14623
diff
changeset
|
2305 |
n->del_req(4); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
14623
diff
changeset
|
2306 |
break; |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
14623
diff
changeset
|
2307 |
} |
1 | 2308 |
default: |
2309 |
break; |
|
2310 |
} |
|
2311 |
} |
|
2312 |
else { |
|
2313 |
ShouldNotReachHere(); |
|
2314 |
} |
|
2315 |
} // end of while (mstack.is_nonempty()) |
|
2316 |
} |
|
2317 |
||
2318 |
#ifdef ASSERT |
|
2319 |
// machine-independent root to machine-dependent root |
|
2320 |
void Matcher::dump_old2new_map() { |
|
2321 |
_old2new_map.dump(); |
|
2322 |
} |
|
2323 |
#endif |
|
2324 |
||
2325 |
//---------------------------collect_null_checks------------------------------- |
|
2326 |
// Find null checks in the ideal graph; write a machine-specific node for |
|
2327 |
// it. Used by later implicit-null-check handling. Actually collects |
|
2328 |
// either an IfTrue or IfFalse for the common NOT-null path, AND the ideal |
|
2329 |
// value being tested. |
|
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2330 |
void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) { |
1 | 2331 |
Node *iff = proj->in(0); |
2332 |
if( iff->Opcode() == Op_If ) { |
|
2333 |
// During matching If's have Bool & Cmp side-by-side |
|
2334 |
BoolNode *b = iff->in(1)->as_Bool(); |
|
2335 |
Node *cmp = iff->in(2); |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2336 |
int opc = cmp->Opcode(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2337 |
if (opc != Op_CmpP && opc != Op_CmpN) return; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2338 |
|
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2339 |
const Type* ct = cmp->in(2)->bottom_type(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2340 |
if (ct == TypePtr::NULL_PTR || |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2341 |
(opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) { |
1 | 2342 |
|
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2343 |
bool push_it = false; |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2344 |
if( proj->Opcode() == Op_IfTrue ) { |
36336
7006dd73b206
8150720: Cleanup code around PrintOptoStatistics
redestad
parents:
36316
diff
changeset
|
2345 |
#ifndef PRODUCT |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2346 |
extern int all_null_checks_found; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2347 |
all_null_checks_found++; |
36336
7006dd73b206
8150720: Cleanup code around PrintOptoStatistics
redestad
parents:
36316
diff
changeset
|
2348 |
#endif |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2349 |
if( b->_test._test == BoolTest::ne ) { |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2350 |
push_it = true; |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2351 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2352 |
} else { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2353 |
assert( proj->Opcode() == Op_IfFalse, "" ); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2354 |
if( b->_test._test == BoolTest::eq ) { |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2355 |
push_it = true; |
1 | 2356 |
} |
2357 |
} |
|
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2358 |
if( push_it ) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2359 |
_null_check_tests.push(proj); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2360 |
Node* val = cmp->in(1); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2361 |
#ifdef _LP64 |
5698
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2362 |
if (val->bottom_type()->isa_narrowoop() && |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2363 |
!Matcher::narrow_oop_use_complex_address()) { |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2364 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2365 |
// Look for DecodeN node which should be pinned to orig_proj. |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2366 |
// On platforms (Sparc) which can not handle 2 adds |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2367 |
// in addressing mode we have to keep a DecodeN node and |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2368 |
// use it to do implicit NULL check in address. |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2369 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2370 |
// DecodeN node was pinned to non-null path (orig_proj) during |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2371 |
// CastPP transformation in final_graph_reshaping_impl(). |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2372 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2373 |
uint cnt = orig_proj->outcnt(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2374 |
for (uint i = 0; i < orig_proj->outcnt(); i++) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2375 |
Node* d = orig_proj->raw_out(i); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2376 |
if (d->is_DecodeN() && d->in(1) == val) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2377 |
val = d; |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2378 |
val->set_req(0, NULL); // Unpin now. |
5698
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2379 |
// Mark this as special case to distinguish from |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2380 |
// a regular case: CmpP(DecodeN, NULL). |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2381 |
val = (Node*)(((intptr_t)val) | 1); |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2382 |
break; |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2383 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2384 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2385 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2386 |
#endif |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2387 |
_null_check_tests.push(val); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2388 |
} |
1 | 2389 |
} |
2390 |
} |
|
2391 |
} |
|
2392 |
||
2393 |
//---------------------------validate_null_checks------------------------------ |
|
2394 |
// Its possible that the value being NULL checked is not the root of a match |
|
2395 |
// tree. If so, I cannot use the value in an implicit null check. |
|
2396 |
void Matcher::validate_null_checks( ) { |
|
2397 |
uint cnt = _null_check_tests.size(); |
|
2398 |
for( uint i=0; i < cnt; i+=2 ) { |
|
2399 |
Node *test = _null_check_tests[i]; |
|
2400 |
Node *val = _null_check_tests[i+1]; |
|
5698
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2401 |
bool is_decoden = ((intptr_t)val) & 1; |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2402 |
val = (Node*)(((intptr_t)val) & ~1); |
1 | 2403 |
if (has_new_node(val)) { |
5698
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2404 |
Node* new_val = new_node(val); |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2405 |
if (is_decoden) { |
13969
d2a189b83b87
7054512: Compress class pointers after perm gen removal
roland
parents:
13895
diff
changeset
|
2406 |
assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity"); |
5698
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2407 |
// Note: new_val may have a control edge if |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2408 |
// the original ideal node DecodeN was matched before |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2409 |
// it was unpinned in Matcher::collect_null_checks(). |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2410 |
// Unpin the mach node and mark it. |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2411 |
new_val->set_req(0, NULL); |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2412 |
new_val = (Node*)(((intptr_t)new_val) | 1); |
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2413 |
} |
1 | 2414 |
// Is a match-tree root, so replace with the matched value |
5698
091095915ee6
6954029: Improve implicit null check generation with compressed oops
kvn
parents:
4751
diff
changeset
|
2415 |
_null_check_tests.map(i+1, new_val); |
1 | 2416 |
} else { |
2417 |
// Yank from candidate list |
|
2418 |
_null_check_tests.map(i+1,_null_check_tests[--cnt]); |
|
2419 |
_null_check_tests.map(i,_null_check_tests[--cnt]); |
|
2420 |
_null_check_tests.pop(); |
|
2421 |
_null_check_tests.pop(); |
|
2422 |
i-=2; |
|
2423 |
} |
|
2424 |
} |
|
2425 |
} |
|
2426 |
||
2427 |
// Used by the DFA in dfa_xxx.cpp. Check for a following barrier or |
|
2428 |
// atomic instruction acting as a store_load barrier without any |
|
2429 |
// intervening volatile load, and thus we don't need a barrier here. |
|
2430 |
// We retain the Node to act as a compiler ordering barrier. |
|
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2431 |
bool Matcher::post_store_load_barrier(const Node* vmb) { |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2432 |
Compile* C = Compile::current(); |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2433 |
assert(vmb->is_MemBar(), ""); |
22855
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22851
diff
changeset
|
2434 |
assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, ""); |
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2435 |
const MemBarNode* membar = vmb->as_MemBar(); |
1 | 2436 |
|
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2437 |
// Get the Ideal Proj node, ctrl, that can be used to iterate forward |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2438 |
Node* ctrl = NULL; |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2439 |
for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) { |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2440 |
Node* p = membar->fast_out(i); |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2441 |
assert(p->is_Proj(), "only projections here"); |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2442 |
if ((p->as_Proj()->_con == TypeFunc::Control) && |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2443 |
!C->node_arena()->contains(p)) { // Unmatched old-space only |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2444 |
ctrl = p; |
1 | 2445 |
break; |
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2446 |
} |
1 | 2447 |
} |
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2448 |
assert((ctrl != NULL), "missing control projection"); |
1 | 2449 |
|
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2450 |
for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) { |
1 | 2451 |
Node *x = ctrl->fast_out(j); |
2452 |
int xop = x->Opcode(); |
|
2453 |
||
2454 |
// We don't need current barrier if we see another or a lock |
|
2455 |
// before seeing volatile load. |
|
2456 |
// |
|
2457 |
// Op_Fastunlock previously appeared in the Op_* list below. |
|
2458 |
// With the advent of 1-0 lock operations we're no longer guaranteed |
|
2459 |
// that a monitor exit operation contains a serializing instruction. |
|
2460 |
||
2461 |
if (xop == Op_MemBarVolatile || |
|
39419
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2462 |
xop == Op_CompareAndExchangeB || |
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2463 |
xop == Op_CompareAndExchangeS || |
36316
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2464 |
xop == Op_CompareAndExchangeI || |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2465 |
xop == Op_CompareAndExchangeL || |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2466 |
xop == Op_CompareAndExchangeP || |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2467 |
xop == Op_CompareAndExchangeN || |
39419
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2468 |
xop == Op_WeakCompareAndSwapB || |
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2469 |
xop == Op_WeakCompareAndSwapS || |
36316
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2470 |
xop == Op_WeakCompareAndSwapL || |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2471 |
xop == Op_WeakCompareAndSwapP || |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2472 |
xop == Op_WeakCompareAndSwapN || |
7a83de7aabca
8148146: Integrate new internal Unsafe entry points, and basic intrinsic support for VarHandles
shade
parents:
35086
diff
changeset
|
2473 |
xop == Op_WeakCompareAndSwapI || |
39419
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2474 |
xop == Op_CompareAndSwapB || |
cc993a4ab581
8157726: VarHandles/Unsafe should support sub-word atomic ops
shade
parents:
38658
diff
changeset
|
2475 |
xop == Op_CompareAndSwapS || |
1 | 2476 |
xop == Op_CompareAndSwapL || |
2477 |
xop == Op_CompareAndSwapP || |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
236
diff
changeset
|
2478 |
xop == Op_CompareAndSwapN || |
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2479 |
xop == Op_CompareAndSwapI) { |
1 | 2480 |
return true; |
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2481 |
} |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2482 |
|
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2483 |
// Op_FastLock previously appeared in the Op_* list above. |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2484 |
// With biased locking we're no longer guaranteed that a monitor |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2485 |
// enter operation contains a serializing instruction. |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2486 |
if ((xop == Op_FastLock) && !UseBiasedLocking) { |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2487 |
return true; |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2488 |
} |
1 | 2489 |
|
2490 |
if (x->is_MemBar()) { |
|
2491 |
// We must retain this membar if there is an upcoming volatile |
|
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2492 |
// load, which will be followed by acquire membar. |
22855
d637fd28a6c3
8028515: PPPC64 (part 113.2): opto: Introduce LoadFence/StoreFence.
goetz
parents:
22851
diff
changeset
|
2493 |
if (xop == Op_MemBarAcquire || xop == Op_LoadFence) { |
1 | 2494 |
return false; |
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2495 |
} else { |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2496 |
// For other kinds of barriers, check by pretending we |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2497 |
// are them, and seeing if we can be removed. |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2498 |
return post_store_load_barrier(x->as_MemBar()); |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2499 |
} |
1 | 2500 |
} |
2501 |
||
18956
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2502 |
// probably not necessary to check for these |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2503 |
if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) { |
f8fc5dd18a1d
8007898: Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier()
kvn
parents:
18103
diff
changeset
|
2504 |
return false; |
1 | 2505 |
} |
2506 |
} |
|
2507 |
return false; |
|
2508 |
} |
|
2509 |
||
22856
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2510 |
// Check whether node n is a branch to an uncommon trap that we could |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2511 |
// optimize as test with very high branch costs in case of going to |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2512 |
// the uncommon trap. The code must be able to be recompiled to use |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2513 |
// a cheaper test. |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2514 |
bool Matcher::branches_to_uncommon_trap(const Node *n) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2515 |
// Don't do it for natives, adapters, or runtime stubs |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2516 |
Compile *C = Compile::current(); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2517 |
if (!C->is_method_compilation()) return false; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2518 |
|
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2519 |
assert(n->is_If(), "You should only call this on if nodes."); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2520 |
IfNode *ifn = n->as_If(); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2521 |
|
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2522 |
Node *ifFalse = NULL; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2523 |
for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2524 |
if (ifn->fast_out(i)->is_IfFalse()) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2525 |
ifFalse = ifn->fast_out(i); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2526 |
break; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2527 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2528 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2529 |
assert(ifFalse, "An If should have an ifFalse. Graph is broken."); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2530 |
|
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2531 |
Node *reg = ifFalse; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2532 |
int cnt = 4; // We must protect against cycles. Limit to 4 iterations. |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2533 |
// Alternatively use visited set? Seems too expensive. |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2534 |
while (reg != NULL && cnt > 0) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2535 |
CallNode *call = NULL; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2536 |
RegionNode *nxt_reg = NULL; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2537 |
for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2538 |
Node *o = reg->fast_out(i); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2539 |
if (o->is_Call()) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2540 |
call = o->as_Call(); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2541 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2542 |
if (o->is_Region()) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2543 |
nxt_reg = o->as_Region(); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2544 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2545 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2546 |
|
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2547 |
if (call && |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2548 |
call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2549 |
const Type* trtype = call->in(TypeFunc::Parms)->bottom_type(); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2550 |
if (trtype->isa_int() && trtype->is_int()->is_con()) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2551 |
jint tr_con = trtype->is_int()->get_con(); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2552 |
Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2553 |
Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2554 |
assert((int)reason < (int)BitsPerInt, "recode bit map"); |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2555 |
|
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2556 |
if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason) |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2557 |
&& action != Deoptimization::Action_none) { |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2558 |
// This uncommon trap is sure to recompile, eventually. |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2559 |
// When that happens, C->too_many_traps will prevent |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2560 |
// this transformation from happening again. |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2561 |
return true; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2562 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2563 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2564 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2565 |
|
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2566 |
reg = nxt_reg; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2567 |
cnt--; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2568 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2569 |
|
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2570 |
return false; |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2571 |
} |
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22855
diff
changeset
|
2572 |
|
1 | 2573 |
//============================================================================= |
2574 |
//---------------------------State--------------------------------------------- |
|
2575 |
State::State(void) { |
|
2576 |
#ifdef ASSERT |
|
2577 |
_id = 0; |
|
2578 |
_kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe); |
|
2579 |
_leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d); |
|
2580 |
//memset(_cost, -1, sizeof(_cost)); |
|
2581 |
//memset(_rule, -1, sizeof(_rule)); |
|
2582 |
#endif |
|
2583 |
memset(_valid, 0, sizeof(_valid)); |
|
2584 |
} |
|
2585 |
||
2586 |
#ifdef ASSERT |
|
2587 |
State::~State() { |
|
2588 |
_id = 99; |
|
2589 |
_kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe); |
|
2590 |
_leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d); |
|
2591 |
memset(_cost, -3, sizeof(_cost)); |
|
2592 |
memset(_rule, -3, sizeof(_rule)); |
|
2593 |
} |
|
2594 |
#endif |
|
2595 |
||
2596 |
#ifndef PRODUCT |
|
2597 |
//---------------------------dump---------------------------------------------- |
|
2598 |
void State::dump() { |
|
2599 |
tty->print("\n"); |
|
2600 |
dump(0); |
|
2601 |
} |
|
2602 |
||
2603 |
void State::dump(int depth) { |
|
2604 |
for( int j = 0; j < depth; j++ ) |
|
2605 |
tty->print(" "); |
|
2606 |
tty->print("--N: "); |
|
2607 |
_leaf->dump(); |
|
2608 |
uint i; |
|
2609 |
for( i = 0; i < _LAST_MACH_OPER; i++ ) |
|
2610 |
// Check for valid entry |
|
2611 |
if( valid(i) ) { |
|
2612 |
for( int j = 0; j < depth; j++ ) |
|
2613 |
tty->print(" "); |
|
2614 |
assert(_cost[i] != max_juint, "cost must be a valid value"); |
|
2615 |
assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule"); |
|
2616 |
tty->print_cr("%s %d %s", |
|
2617 |
ruleName[i], _cost[i], ruleName[_rule[i]] ); |
|
2618 |
} |
|
24424
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
23528
diff
changeset
|
2619 |
tty->cr(); |
1 | 2620 |
|
2621 |
for( i=0; i<2; i++ ) |
|
2622 |
if( _kids[i] ) |
|
2623 |
_kids[i]->dump(depth+1); |
|
2624 |
} |
|
2625 |
#endif |