author | kvn |
Thu, 13 Mar 2008 16:06:34 -0700 | |
changeset 236 | 9a04268c8eea |
parent 212 | cd4963e67949 |
child 670 | ddf3e9583f2f |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2 |
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
class CmpNode; |
|
26 |
class CountedLoopEndNode; |
|
27 |
class CountedLoopNode; |
|
28 |
class IdealLoopTree; |
|
29 |
class LoopNode; |
|
30 |
class Node; |
|
31 |
class PhaseIdealLoop; |
|
32 |
class VectorSet; |
|
33 |
struct small_cache; |
|
34 |
||
35 |
// |
|
36 |
// I D E A L I Z E D L O O P S |
|
37 |
// |
|
38 |
// Idealized loops are the set of loops I perform more interesting |
|
39 |
// transformations on, beyond simple hoisting. |
|
40 |
||
41 |
//------------------------------LoopNode--------------------------------------- |
|
42 |
// Simple loop header. Fall in path on left, loop-back path on right. |
|
43 |
class LoopNode : public RegionNode { |
|
44 |
// Size is bigger to hold the flags. However, the flags do not change |
|
45 |
// the semantics so it does not appear in the hash & cmp functions. |
|
46 |
virtual uint size_of() const { return sizeof(*this); } |
|
47 |
protected: |
|
48 |
short _loop_flags; |
|
49 |
// Names for flag bitfields |
|
50 |
enum { pre_post_main=0, inner_loop=8, partial_peel_loop=16, partial_peel_failed=32 }; |
|
51 |
char _unswitch_count; |
|
52 |
enum { _unswitch_max=3 }; |
|
53 |
||
54 |
public: |
|
55 |
// Names for edge indices |
|
56 |
enum { Self=0, EntryControl, LoopBackControl }; |
|
57 |
||
58 |
int is_inner_loop() const { return _loop_flags & inner_loop; } |
|
59 |
void set_inner_loop() { _loop_flags |= inner_loop; } |
|
60 |
||
61 |
int is_partial_peel_loop() const { return _loop_flags & partial_peel_loop; } |
|
62 |
void set_partial_peel_loop() { _loop_flags |= partial_peel_loop; } |
|
63 |
int partial_peel_has_failed() const { return _loop_flags & partial_peel_failed; } |
|
64 |
void mark_partial_peel_failed() { _loop_flags |= partial_peel_failed; } |
|
65 |
||
66 |
int unswitch_max() { return _unswitch_max; } |
|
67 |
int unswitch_count() { return _unswitch_count; } |
|
68 |
void set_unswitch_count(int val) { |
|
69 |
assert (val <= unswitch_max(), "too many unswitches"); |
|
70 |
_unswitch_count = val; |
|
71 |
} |
|
72 |
||
73 |
LoopNode( Node *entry, Node *backedge ) : RegionNode(3), _loop_flags(0), _unswitch_count(0) { |
|
74 |
init_class_id(Class_Loop); |
|
75 |
init_req(EntryControl, entry); |
|
76 |
init_req(LoopBackControl, backedge); |
|
77 |
} |
|
78 |
||
79 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
80 |
virtual int Opcode() const; |
|
81 |
bool can_be_counted_loop(PhaseTransform* phase) const { |
|
82 |
return req() == 3 && in(0) != NULL && |
|
83 |
in(1) != NULL && phase->type(in(1)) != Type::TOP && |
|
84 |
in(2) != NULL && phase->type(in(2)) != Type::TOP; |
|
85 |
} |
|
86 |
#ifndef PRODUCT |
|
87 |
virtual void dump_spec(outputStream *st) const; |
|
88 |
#endif |
|
89 |
}; |
|
90 |
||
91 |
//------------------------------Counted Loops---------------------------------- |
|
92 |
// Counted loops are all trip-counted loops, with exactly 1 trip-counter exit |
|
93 |
// path (and maybe some other exit paths). The trip-counter exit is always |
|
94 |
// last in the loop. The trip-counter does not have to stride by a constant, |
|
95 |
// but it does have to stride by a loop-invariant amount; the exit value is |
|
96 |
// also loop invariant. |
|
97 |
||
98 |
// CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The |
|
99 |
// CountedLoopNode has the incoming loop control and the loop-back-control |
|
100 |
// which is always the IfTrue before the matching CountedLoopEndNode. The |
|
101 |
// CountedLoopEndNode has an incoming control (possibly not the |
|
102 |
// CountedLoopNode if there is control flow in the loop), the post-increment |
|
103 |
// trip-counter value, and the limit. The trip-counter value is always of |
|
104 |
// the form (Op old-trip-counter stride). The old-trip-counter is produced |
|
105 |
// by a Phi connected to the CountedLoopNode. The stride is loop invariant. |
|
106 |
// The Op is any commutable opcode, including Add, Mul, Xor. The |
|
107 |
// CountedLoopEndNode also takes in the loop-invariant limit value. |
|
108 |
||
109 |
// From a CountedLoopNode I can reach the matching CountedLoopEndNode via the |
|
110 |
// loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes |
|
111 |
// via the old-trip-counter from the Op node. |
|
112 |
||
113 |
//------------------------------CountedLoopNode-------------------------------- |
|
114 |
// CountedLoopNodes head simple counted loops. CountedLoopNodes have as |
|
115 |
// inputs the incoming loop-start control and the loop-back control, so they |
|
116 |
// act like RegionNodes. They also take in the initial trip counter, the |
|
117 |
// loop-invariant stride and the loop-invariant limit value. CountedLoopNodes |
|
118 |
// produce a loop-body control and the trip counter value. Since |
|
119 |
// CountedLoopNodes behave like RegionNodes I still have a standard CFG model. |
|
120 |
||
121 |
class CountedLoopNode : public LoopNode { |
|
122 |
// Size is bigger to hold _main_idx. However, _main_idx does not change |
|
123 |
// the semantics so it does not appear in the hash & cmp functions. |
|
124 |
virtual uint size_of() const { return sizeof(*this); } |
|
125 |
||
126 |
// For Pre- and Post-loops during debugging ONLY, this holds the index of |
|
127 |
// the Main CountedLoop. Used to assert that we understand the graph shape. |
|
128 |
node_idx_t _main_idx; |
|
129 |
||
130 |
// Known trip count calculated by policy_maximally_unroll |
|
131 |
int _trip_count; |
|
132 |
||
133 |
// Expected trip count from profile data |
|
134 |
float _profile_trip_cnt; |
|
135 |
||
136 |
// Log2 of original loop bodies in unrolled loop |
|
137 |
int _unrolled_count_log2; |
|
138 |
||
139 |
// Node count prior to last unrolling - used to decide if |
|
140 |
// unroll,optimize,unroll,optimize,... is making progress |
|
141 |
int _node_count_before_unroll; |
|
142 |
||
143 |
public: |
|
144 |
CountedLoopNode( Node *entry, Node *backedge ) |
|
145 |
: LoopNode(entry, backedge), _trip_count(max_jint), |
|
146 |
_profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0), |
|
147 |
_node_count_before_unroll(0) { |
|
148 |
init_class_id(Class_CountedLoop); |
|
149 |
// Initialize _trip_count to the largest possible value. |
|
150 |
// Will be reset (lower) if the loop's trip count is known. |
|
151 |
} |
|
152 |
||
153 |
virtual int Opcode() const; |
|
154 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
155 |
||
156 |
Node *init_control() const { return in(EntryControl); } |
|
157 |
Node *back_control() const { return in(LoopBackControl); } |
|
158 |
CountedLoopEndNode *loopexit() const; |
|
159 |
Node *init_trip() const; |
|
160 |
Node *stride() const; |
|
161 |
int stride_con() const; |
|
162 |
bool stride_is_con() const; |
|
163 |
Node *limit() const; |
|
164 |
Node *incr() const; |
|
165 |
Node *phi() const; |
|
166 |
||
167 |
// Match increment with optional truncation |
|
168 |
static Node* match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type); |
|
169 |
||
170 |
// A 'main' loop has a pre-loop and a post-loop. The 'main' loop |
|
171 |
// can run short a few iterations and may start a few iterations in. |
|
172 |
// It will be RCE'd and unrolled and aligned. |
|
173 |
||
174 |
// A following 'post' loop will run any remaining iterations. Used |
|
175 |
// during Range Check Elimination, the 'post' loop will do any final |
|
176 |
// iterations with full checks. Also used by Loop Unrolling, where |
|
177 |
// the 'post' loop will do any epilog iterations needed. Basically, |
|
178 |
// a 'post' loop can not profitably be further unrolled or RCE'd. |
|
179 |
||
180 |
// A preceding 'pre' loop will run at least 1 iteration (to do peeling), |
|
181 |
// it may do under-flow checks for RCE and may do alignment iterations |
|
182 |
// so the following main loop 'knows' that it is striding down cache |
|
183 |
// lines. |
|
184 |
||
185 |
// A 'main' loop that is ONLY unrolled or peeled, never RCE'd or |
|
186 |
// Aligned, may be missing it's pre-loop. |
|
187 |
enum { Normal=0, Pre=1, Main=2, Post=3, PrePostFlagsMask=3, Main_Has_No_Pre_Loop=4 }; |
|
188 |
int is_normal_loop() const { return (_loop_flags&PrePostFlagsMask) == Normal; } |
|
189 |
int is_pre_loop () const { return (_loop_flags&PrePostFlagsMask) == Pre; } |
|
190 |
int is_main_loop () const { return (_loop_flags&PrePostFlagsMask) == Main; } |
|
191 |
int is_post_loop () const { return (_loop_flags&PrePostFlagsMask) == Post; } |
|
192 |
int is_main_no_pre_loop() const { return _loop_flags & Main_Has_No_Pre_Loop; } |
|
193 |
void set_main_no_pre_loop() { _loop_flags |= Main_Has_No_Pre_Loop; } |
|
194 |
||
195 |
||
196 |
void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; } |
|
197 |
void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; } |
|
198 |
void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; } |
|
199 |
void set_normal_loop( ) { _loop_flags &= ~PrePostFlagsMask; } |
|
200 |
||
201 |
void set_trip_count(int tc) { _trip_count = tc; } |
|
202 |
int trip_count() { return _trip_count; } |
|
203 |
||
204 |
void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; } |
|
205 |
float profile_trip_cnt() { return _profile_trip_cnt; } |
|
206 |
||
207 |
void double_unrolled_count() { _unrolled_count_log2++; } |
|
208 |
int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); } |
|
209 |
||
210 |
void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; } |
|
211 |
int node_count_before_unroll() { return _node_count_before_unroll; } |
|
212 |
||
213 |
#ifndef PRODUCT |
|
214 |
virtual void dump_spec(outputStream *st) const; |
|
215 |
#endif |
|
216 |
}; |
|
217 |
||
218 |
//------------------------------CountedLoopEndNode----------------------------- |
|
219 |
// CountedLoopEndNodes end simple trip counted loops. They act much like |
|
220 |
// IfNodes. |
|
221 |
class CountedLoopEndNode : public IfNode { |
|
222 |
public: |
|
223 |
enum { TestControl, TestValue }; |
|
224 |
||
225 |
CountedLoopEndNode( Node *control, Node *test, float prob, float cnt ) |
|
226 |
: IfNode( control, test, prob, cnt) { |
|
227 |
init_class_id(Class_CountedLoopEnd); |
|
228 |
} |
|
229 |
virtual int Opcode() const; |
|
230 |
||
231 |
Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; } |
|
232 |
Node *incr() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } |
|
233 |
Node *limit() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } |
|
234 |
Node *stride() const { Node *tmp = incr (); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } |
|
235 |
Node *phi() const { Node *tmp = incr (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } |
|
236 |
Node *init_trip() const { Node *tmp = phi (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } |
|
237 |
int stride_con() const; |
|
238 |
bool stride_is_con() const { Node *tmp = stride (); return (tmp != NULL && tmp->is_Con()); } |
|
239 |
BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; } |
|
240 |
CountedLoopNode *loopnode() const { |
|
241 |
Node *ln = phi()->in(0); |
|
242 |
assert( ln->Opcode() == Op_CountedLoop, "malformed loop" ); |
|
243 |
return (CountedLoopNode*)ln; } |
|
244 |
||
245 |
#ifndef PRODUCT |
|
246 |
virtual void dump_spec(outputStream *st) const; |
|
247 |
#endif |
|
248 |
}; |
|
249 |
||
250 |
||
251 |
inline CountedLoopEndNode *CountedLoopNode::loopexit() const { |
|
252 |
Node *bc = back_control(); |
|
253 |
if( bc == NULL ) return NULL; |
|
254 |
Node *le = bc->in(0); |
|
255 |
if( le->Opcode() != Op_CountedLoopEnd ) |
|
256 |
return NULL; |
|
257 |
return (CountedLoopEndNode*)le; |
|
258 |
} |
|
259 |
inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; } |
|
260 |
inline Node *CountedLoopNode::stride() const { return loopexit() ? loopexit()->stride() : NULL; } |
|
261 |
inline int CountedLoopNode::stride_con() const { return loopexit() ? loopexit()->stride_con() : 0; } |
|
262 |
inline bool CountedLoopNode::stride_is_con() const { return loopexit() && loopexit()->stride_is_con(); } |
|
263 |
inline Node *CountedLoopNode::limit() const { return loopexit() ? loopexit()->limit() : NULL; } |
|
264 |
inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; } |
|
265 |
inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; } |
|
266 |
||
267 |
||
268 |
// -----------------------------IdealLoopTree---------------------------------- |
|
269 |
class IdealLoopTree : public ResourceObj { |
|
270 |
public: |
|
271 |
IdealLoopTree *_parent; // Parent in loop tree |
|
272 |
IdealLoopTree *_next; // Next sibling in loop tree |
|
273 |
IdealLoopTree *_child; // First child in loop tree |
|
274 |
||
275 |
// The head-tail backedge defines the loop. |
|
276 |
// If tail is NULL then this loop has multiple backedges as part of the |
|
277 |
// same loop. During cleanup I'll peel off the multiple backedges; merge |
|
278 |
// them at the loop bottom and flow 1 real backedge into the loop. |
|
279 |
Node *_head; // Head of loop |
|
280 |
Node *_tail; // Tail of loop |
|
281 |
inline Node *tail(); // Handle lazy update of _tail field |
|
282 |
PhaseIdealLoop* _phase; |
|
283 |
||
284 |
Node_List _body; // Loop body for inner loops |
|
285 |
||
286 |
uint8 _nest; // Nesting depth |
|
287 |
uint8 _irreducible:1, // True if irreducible |
|
288 |
_has_call:1, // True if has call safepoint |
|
289 |
_has_sfpt:1, // True if has non-call safepoint |
|
290 |
_rce_candidate:1; // True if candidate for range check elimination |
|
291 |
||
212
cd4963e67949
6667612: (Escape Analysis) disable loop cloning if it has a scalar replaceable allocation
kvn
parents:
190
diff
changeset
|
292 |
Node_List* _required_safept; // A inner loop cannot delete these safepts; |
cd4963e67949
6667612: (Escape Analysis) disable loop cloning if it has a scalar replaceable allocation
kvn
parents:
190
diff
changeset
|
293 |
bool _allow_optimizations; // Allow loop optimizations |
1 | 294 |
|
295 |
IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail ) |
|
296 |
: _parent(0), _next(0), _child(0), |
|
297 |
_head(head), _tail(tail), |
|
298 |
_phase(phase), |
|
299 |
_required_safept(NULL), |
|
212
cd4963e67949
6667612: (Escape Analysis) disable loop cloning if it has a scalar replaceable allocation
kvn
parents:
190
diff
changeset
|
300 |
_allow_optimizations(true), |
1 | 301 |
_nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0) |
302 |
{ } |
|
303 |
||
304 |
// Is 'l' a member of 'this'? |
|
305 |
int is_member( const IdealLoopTree *l ) const; // Test for nested membership |
|
306 |
||
307 |
// Set loop nesting depth. Accumulate has_call bits. |
|
308 |
int set_nest( uint depth ); |
|
309 |
||
310 |
// Split out multiple fall-in edges from the loop header. Move them to a |
|
311 |
// private RegionNode before the loop. This becomes the loop landing pad. |
|
312 |
void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ); |
|
313 |
||
314 |
// Split out the outermost loop from this shared header. |
|
315 |
void split_outer_loop( PhaseIdealLoop *phase ); |
|
316 |
||
317 |
// Merge all the backedges from the shared header into a private Region. |
|
318 |
// Feed that region as the one backedge to this loop. |
|
319 |
void merge_many_backedges( PhaseIdealLoop *phase ); |
|
320 |
||
321 |
// Split shared headers and insert loop landing pads. |
|
322 |
// Insert a LoopNode to replace the RegionNode. |
|
323 |
// Returns TRUE if loop tree is structurally changed. |
|
324 |
bool beautify_loops( PhaseIdealLoop *phase ); |
|
325 |
||
326 |
// Perform iteration-splitting on inner loops. Split iterations to avoid |
|
327 |
// range checks or one-shot null checks. |
|
328 |
void iteration_split( PhaseIdealLoop *phase, Node_List &old_new ); |
|
329 |
||
330 |
// Driver for various flavors of iteration splitting |
|
331 |
void iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ); |
|
332 |
||
333 |
// Given dominators, try to find loops with calls that must always be |
|
334 |
// executed (call dominates loop tail). These loops do not need non-call |
|
335 |
// safepoints (ncsfpt). |
|
336 |
void check_safepts(VectorSet &visited, Node_List &stack); |
|
337 |
||
338 |
// Allpaths backwards scan from loop tail, terminating each path at first safepoint |
|
339 |
// encountered. |
|
340 |
void allpaths_check_safepts(VectorSet &visited, Node_List &stack); |
|
341 |
||
342 |
// Convert to counted loops where possible |
|
343 |
void counted_loop( PhaseIdealLoop *phase ); |
|
344 |
||
345 |
// Check for Node being a loop-breaking test |
|
346 |
Node *is_loop_exit(Node *iff) const; |
|
347 |
||
348 |
// Returns true if ctrl is executed on every complete iteration |
|
349 |
bool dominates_backedge(Node* ctrl); |
|
350 |
||
351 |
// Remove simplistic dead code from loop body |
|
352 |
void DCE_loop_body(); |
|
353 |
||
354 |
// Look for loop-exit tests with my 50/50 guesses from the Parsing stage. |
|
355 |
// Replace with a 1-in-10 exit guess. |
|
356 |
void adjust_loop_exit_prob( PhaseIdealLoop *phase ); |
|
357 |
||
358 |
// Return TRUE or FALSE if the loop should never be RCE'd or aligned. |
|
359 |
// Useful for unrolling loops with NO array accesses. |
|
360 |
bool policy_peel_only( PhaseIdealLoop *phase ) const; |
|
361 |
||
362 |
// Return TRUE or FALSE if the loop should be unswitched -- clone |
|
363 |
// loop with an invariant test |
|
364 |
bool policy_unswitching( PhaseIdealLoop *phase ) const; |
|
365 |
||
366 |
// Micro-benchmark spamming. Remove empty loops. |
|
367 |
bool policy_do_remove_empty_loop( PhaseIdealLoop *phase ); |
|
368 |
||
369 |
// Return TRUE or FALSE if the loop should be peeled or not. Peel if we can |
|
370 |
// make some loop-invariant test (usually a null-check) happen before the |
|
371 |
// loop. |
|
372 |
bool policy_peeling( PhaseIdealLoop *phase ) const; |
|
373 |
||
374 |
// Return TRUE or FALSE if the loop should be maximally unrolled. Stash any |
|
375 |
// known trip count in the counted loop node. |
|
376 |
bool policy_maximally_unroll( PhaseIdealLoop *phase ) const; |
|
377 |
||
378 |
// Return TRUE or FALSE if the loop should be unrolled or not. Unroll if |
|
379 |
// the loop is a CountedLoop and the body is small enough. |
|
380 |
bool policy_unroll( PhaseIdealLoop *phase ) const; |
|
381 |
||
382 |
// Return TRUE or FALSE if the loop should be range-check-eliminated. |
|
383 |
// Gather a list of IF tests that are dominated by iteration splitting; |
|
384 |
// also gather the end of the first split and the start of the 2nd split. |
|
385 |
bool policy_range_check( PhaseIdealLoop *phase ) const; |
|
386 |
||
387 |
// Return TRUE or FALSE if the loop should be cache-line aligned. |
|
388 |
// Gather the expression that does the alignment. Note that only |
|
389 |
// one array base can be aligned in a loop (unless the VM guarentees |
|
390 |
// mutual alignment). Note that if we vectorize short memory ops |
|
391 |
// into longer memory ops, we may want to increase alignment. |
|
392 |
bool policy_align( PhaseIdealLoop *phase ) const; |
|
393 |
||
394 |
// Compute loop trip count from profile data |
|
395 |
void compute_profile_trip_cnt( PhaseIdealLoop *phase ); |
|
396 |
||
397 |
// Reassociate invariant expressions. |
|
398 |
void reassociate_invariants(PhaseIdealLoop *phase); |
|
399 |
// Reassociate invariant add and subtract expressions. |
|
400 |
Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase); |
|
401 |
// Return nonzero index of invariant operand if invariant and variant |
|
402 |
// are combined with an Add or Sub. Helper for reassoicate_invariants. |
|
403 |
int is_invariant_addition(Node* n, PhaseIdealLoop *phase); |
|
404 |
||
405 |
// Return true if n is invariant |
|
406 |
bool is_invariant(Node* n) const; |
|
407 |
||
408 |
// Put loop body on igvn work list |
|
409 |
void record_for_igvn(); |
|
410 |
||
411 |
bool is_loop() { return !_irreducible && _tail && !_tail->is_top(); } |
|
412 |
bool is_inner() { return is_loop() && _child == NULL; } |
|
413 |
bool is_counted() { return is_loop() && _head != NULL && _head->is_CountedLoop(); } |
|
414 |
||
415 |
#ifndef PRODUCT |
|
416 |
void dump_head( ) const; // Dump loop head only |
|
417 |
void dump() const; // Dump this loop recursively |
|
418 |
void verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const; |
|
419 |
#endif |
|
420 |
||
421 |
}; |
|
422 |
||
423 |
// -----------------------------PhaseIdealLoop--------------------------------- |
|
424 |
// Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees into a |
|
425 |
// loop tree. Drives the loop-based transformations on the ideal graph. |
|
426 |
class PhaseIdealLoop : public PhaseTransform { |
|
427 |
friend class IdealLoopTree; |
|
428 |
friend class SuperWord; |
|
429 |
// Pre-computed def-use info |
|
430 |
PhaseIterGVN &_igvn; |
|
431 |
||
432 |
// Head of loop tree |
|
433 |
IdealLoopTree *_ltree_root; |
|
434 |
||
435 |
// Array of pre-order numbers, plus post-visited bit. |
|
436 |
// ZERO for not pre-visited. EVEN for pre-visited but not post-visited. |
|
437 |
// ODD for post-visited. Other bits are the pre-order number. |
|
438 |
uint *_preorders; |
|
439 |
uint _max_preorder; |
|
440 |
||
441 |
// Allocate _preorders[] array |
|
442 |
void allocate_preorders() { |
|
443 |
_max_preorder = C->unique()+8; |
|
444 |
_preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder); |
|
445 |
memset(_preorders, 0, sizeof(uint) * _max_preorder); |
|
446 |
} |
|
447 |
||
448 |
// Allocate _preorders[] array |
|
449 |
void reallocate_preorders() { |
|
450 |
if ( _max_preorder < C->unique() ) { |
|
451 |
_preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, C->unique()); |
|
452 |
_max_preorder = C->unique(); |
|
453 |
} |
|
454 |
memset(_preorders, 0, sizeof(uint) * _max_preorder); |
|
455 |
} |
|
456 |
||
457 |
// Check to grow _preorders[] array for the case when build_loop_tree_impl() |
|
458 |
// adds new nodes. |
|
459 |
void check_grow_preorders( ) { |
|
460 |
if ( _max_preorder < C->unique() ) { |
|
461 |
uint newsize = _max_preorder<<1; // double size of array |
|
462 |
_preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, newsize); |
|
463 |
memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder)); |
|
464 |
_max_preorder = newsize; |
|
465 |
} |
|
466 |
} |
|
467 |
// Check for pre-visited. Zero for NOT visited; non-zero for visited. |
|
468 |
int is_visited( Node *n ) const { return _preorders[n->_idx]; } |
|
469 |
// Pre-order numbers are written to the Nodes array as low-bit-set values. |
|
470 |
void set_preorder_visited( Node *n, int pre_order ) { |
|
471 |
assert( !is_visited( n ), "already set" ); |
|
472 |
_preorders[n->_idx] = (pre_order<<1); |
|
473 |
}; |
|
474 |
// Return pre-order number. |
|
475 |
int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; } |
|
476 |
||
477 |
// Check for being post-visited. |
|
478 |
// Should be previsited already (checked with assert(is_visited(n))). |
|
479 |
int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; } |
|
480 |
||
481 |
// Mark as post visited |
|
482 |
void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; } |
|
483 |
||
484 |
// Set/get control node out. Set lower bit to distinguish from IdealLoopTree |
|
485 |
// Returns true if "n" is a data node, false if it's a control node. |
|
486 |
bool has_ctrl( Node *n ) const { return ((intptr_t)_nodes[n->_idx]) & 1; } |
|
487 |
||
488 |
// clear out dead code after build_loop_late |
|
489 |
Node_List _deadlist; |
|
490 |
||
491 |
// Support for faster execution of get_late_ctrl()/dom_lca() |
|
492 |
// when a node has many uses and dominator depth is deep. |
|
493 |
Node_Array _dom_lca_tags; |
|
494 |
void init_dom_lca_tags(); |
|
495 |
void clear_dom_lca_tags(); |
|
496 |
// Inline wrapper for frequent cases: |
|
497 |
// 1) only one use |
|
498 |
// 2) a use is the same as the current LCA passed as 'n1' |
|
499 |
Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) { |
|
500 |
assert( n->is_CFG(), "" ); |
|
501 |
// Fast-path NULL lca |
|
502 |
if( lca != NULL && lca != n ) { |
|
503 |
assert( lca->is_CFG(), "" ); |
|
504 |
// find LCA of all uses |
|
505 |
n = dom_lca_for_get_late_ctrl_internal( lca, n, tag ); |
|
506 |
} |
|
507 |
return find_non_split_ctrl(n); |
|
508 |
} |
|
509 |
Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag ); |
|
510 |
// true if CFG node d dominates CFG node n |
|
511 |
bool is_dominator(Node *d, Node *n); |
|
512 |
||
513 |
// Helper function for directing control inputs away from CFG split |
|
514 |
// points. |
|
515 |
Node *find_non_split_ctrl( Node *ctrl ) const { |
|
516 |
if (ctrl != NULL) { |
|
517 |
if (ctrl->is_MultiBranch()) { |
|
518 |
ctrl = ctrl->in(0); |
|
519 |
} |
|
520 |
assert(ctrl->is_CFG(), "CFG"); |
|
521 |
} |
|
522 |
return ctrl; |
|
523 |
} |
|
524 |
||
525 |
public: |
|
526 |
bool has_node( Node* n ) const { return _nodes[n->_idx] != NULL; } |
|
527 |
// check if transform created new nodes that need _ctrl recorded |
|
528 |
Node *get_late_ctrl( Node *n, Node *early ); |
|
529 |
Node *get_early_ctrl( Node *n ); |
|
530 |
void set_early_ctrl( Node *n ); |
|
531 |
void set_subtree_ctrl( Node *root ); |
|
532 |
void set_ctrl( Node *n, Node *ctrl ) { |
|
533 |
assert( !has_node(n) || has_ctrl(n), "" ); |
|
534 |
assert( ctrl->in(0), "cannot set dead control node" ); |
|
535 |
assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" ); |
|
536 |
_nodes.map( n->_idx, (Node*)((intptr_t)ctrl + 1) ); |
|
537 |
} |
|
538 |
// Set control and update loop membership |
|
539 |
void set_ctrl_and_loop(Node* n, Node* ctrl) { |
|
540 |
IdealLoopTree* old_loop = get_loop(get_ctrl(n)); |
|
541 |
IdealLoopTree* new_loop = get_loop(ctrl); |
|
542 |
if (old_loop != new_loop) { |
|
543 |
if (old_loop->_child == NULL) old_loop->_body.yank(n); |
|
544 |
if (new_loop->_child == NULL) new_loop->_body.push(n); |
|
545 |
} |
|
546 |
set_ctrl(n, ctrl); |
|
547 |
} |
|
548 |
// Control nodes can be replaced or subsumed. During this pass they |
|
549 |
// get their replacement Node in slot 1. Instead of updating the block |
|
550 |
// location of all Nodes in the subsumed block, we lazily do it. As we |
|
551 |
// pull such a subsumed block out of the array, we write back the final |
|
552 |
// correct block. |
|
553 |
Node *get_ctrl( Node *i ) { |
|
554 |
assert(has_node(i), ""); |
|
555 |
Node *n = get_ctrl_no_update(i); |
|
556 |
_nodes.map( i->_idx, (Node*)((intptr_t)n + 1) ); |
|
557 |
assert(has_node(i) && has_ctrl(i), ""); |
|
558 |
assert(n == find_non_split_ctrl(n), "must return legal ctrl" ); |
|
559 |
return n; |
|
560 |
} |
|
561 |
||
562 |
private: |
|
563 |
Node *get_ctrl_no_update( Node *i ) const { |
|
564 |
assert( has_ctrl(i), "" ); |
|
565 |
Node *n = (Node*)(((intptr_t)_nodes[i->_idx]) & ~1); |
|
566 |
if (!n->in(0)) { |
|
567 |
// Skip dead CFG nodes |
|
568 |
do { |
|
569 |
n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1); |
|
570 |
} while (!n->in(0)); |
|
571 |
n = find_non_split_ctrl(n); |
|
572 |
} |
|
573 |
return n; |
|
574 |
} |
|
575 |
||
576 |
// Check for loop being set |
|
577 |
// "n" must be a control node. Returns true if "n" is known to be in a loop. |
|
578 |
bool has_loop( Node *n ) const { |
|
579 |
assert(!has_node(n) || !has_ctrl(n), ""); |
|
580 |
return has_node(n); |
|
581 |
} |
|
582 |
// Set loop |
|
583 |
void set_loop( Node *n, IdealLoopTree *loop ) { |
|
584 |
_nodes.map(n->_idx, (Node*)loop); |
|
585 |
} |
|
586 |
// Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace |
|
587 |
// the 'old_node' with 'new_node'. Kill old-node. Add a reference |
|
588 |
// from old_node to new_node to support the lazy update. Reference |
|
589 |
// replaces loop reference, since that is not neede for dead node. |
|
590 |
public: |
|
591 |
void lazy_update( Node *old_node, Node *new_node ) { |
|
592 |
assert( old_node != new_node, "no cycles please" ); |
|
593 |
//old_node->set_req( 1, new_node /*NO DU INFO*/ ); |
|
594 |
// Nodes always have DU info now, so re-use the side array slot |
|
595 |
// for this node to provide the forwarding pointer. |
|
596 |
_nodes.map( old_node->_idx, (Node*)((intptr_t)new_node + 1) ); |
|
597 |
} |
|
598 |
void lazy_replace( Node *old_node, Node *new_node ) { |
|
599 |
_igvn.hash_delete(old_node); |
|
600 |
_igvn.subsume_node( old_node, new_node ); |
|
601 |
lazy_update( old_node, new_node ); |
|
602 |
} |
|
603 |
void lazy_replace_proj( Node *old_node, Node *new_node ) { |
|
604 |
assert( old_node->req() == 1, "use this for Projs" ); |
|
605 |
_igvn.hash_delete(old_node); // Must hash-delete before hacking edges |
|
606 |
old_node->add_req( NULL ); |
|
607 |
lazy_replace( old_node, new_node ); |
|
608 |
} |
|
609 |
||
610 |
private: |
|
611 |
||
612 |
// Place 'n' in some loop nest, where 'n' is a CFG node |
|
613 |
void build_loop_tree(); |
|
614 |
int build_loop_tree_impl( Node *n, int pre_order ); |
|
615 |
// Insert loop into the existing loop tree. 'innermost' is a leaf of the |
|
616 |
// loop tree, not the root. |
|
617 |
IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost ); |
|
618 |
||
619 |
// Place Data nodes in some loop nest |
|
620 |
void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ); |
|
621 |
void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ); |
|
622 |
void build_loop_late_post ( Node* n, const PhaseIdealLoop *verify_me ); |
|
623 |
||
624 |
// Array of immediate dominance info for each CFG node indexed by node idx |
|
625 |
private: |
|
626 |
uint _idom_size; |
|
627 |
Node **_idom; // Array of immediate dominators |
|
628 |
uint *_dom_depth; // Used for fast LCA test |
|
629 |
GrowableArray<uint>* _dom_stk; // For recomputation of dom depth |
|
630 |
||
631 |
Node* idom_no_update(Node* d) const { |
|
632 |
assert(d->_idx < _idom_size, "oob"); |
|
633 |
Node* n = _idom[d->_idx]; |
|
634 |
assert(n != NULL,"Bad immediate dominator info."); |
|
635 |
while (n->in(0) == NULL) { // Skip dead CFG nodes |
|
636 |
//n = n->in(1); |
|
637 |
n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1); |
|
638 |
assert(n != NULL,"Bad immediate dominator info."); |
|
639 |
} |
|
640 |
return n; |
|
641 |
} |
|
642 |
Node *idom(Node* d) const { |
|
643 |
uint didx = d->_idx; |
|
644 |
Node *n = idom_no_update(d); |
|
645 |
_idom[didx] = n; // Lazily remove dead CFG nodes from table. |
|
646 |
return n; |
|
647 |
} |
|
648 |
uint dom_depth(Node* d) const { |
|
649 |
assert(d->_idx < _idom_size, ""); |
|
650 |
return _dom_depth[d->_idx]; |
|
651 |
} |
|
652 |
void set_idom(Node* d, Node* n, uint dom_depth); |
|
653 |
// Locally compute IDOM using dom_lca call |
|
654 |
Node *compute_idom( Node *region ) const; |
|
655 |
// Recompute dom_depth |
|
656 |
void recompute_dom_depth(); |
|
657 |
||
658 |
// Is safept not required by an outer loop? |
|
659 |
bool is_deleteable_safept(Node* sfpt); |
|
660 |
||
661 |
public: |
|
662 |
// Dominators for the sea of nodes |
|
663 |
void Dominators(); |
|
664 |
Node *dom_lca( Node *n1, Node *n2 ) const { |
|
665 |
return find_non_split_ctrl(dom_lca_internal(n1, n2)); |
|
666 |
} |
|
667 |
Node *dom_lca_internal( Node *n1, Node *n2 ) const; |
|
668 |
||
669 |
// Compute the Ideal Node to Loop mapping |
|
670 |
PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me, bool do_split_ifs ); |
|
671 |
||
672 |
// True if the method has at least 1 irreducible loop |
|
673 |
bool _has_irreducible_loops; |
|
674 |
||
675 |
// Per-Node transform |
|
676 |
virtual Node *transform( Node *a_node ) { return 0; } |
|
677 |
||
678 |
Node *is_counted_loop( Node *x, IdealLoopTree *loop ); |
|
679 |
||
680 |
// Return a post-walked LoopNode |
|
681 |
IdealLoopTree *get_loop( Node *n ) const { |
|
682 |
// Dead nodes have no loop, so return the top level loop instead |
|
683 |
if (!has_node(n)) return _ltree_root; |
|
684 |
assert(!has_ctrl(n), ""); |
|
685 |
return (IdealLoopTree*)_nodes[n->_idx]; |
|
686 |
} |
|
687 |
||
688 |
// Is 'n' a (nested) member of 'loop'? |
|
689 |
int is_member( const IdealLoopTree *loop, Node *n ) const { |
|
690 |
return loop->is_member(get_loop(n)); } |
|
691 |
||
692 |
// This is the basic building block of the loop optimizations. It clones an |
|
693 |
// entire loop body. It makes an old_new loop body mapping; with this |
|
694 |
// mapping you can find the new-loop equivalent to an old-loop node. All |
|
695 |
// new-loop nodes are exactly equal to their old-loop counterparts, all |
|
696 |
// edges are the same. All exits from the old-loop now have a RegionNode |
|
697 |
// that merges the equivalent new-loop path. This is true even for the |
|
698 |
// normal "loop-exit" condition. All uses of loop-invariant old-loop values |
|
699 |
// now come from (one or more) Phis that merge their new-loop equivalents. |
|
700 |
// Parameter side_by_side_idom: |
|
701 |
// When side_by_size_idom is NULL, the dominator tree is constructed for |
|
702 |
// the clone loop to dominate the original. Used in construction of |
|
703 |
// pre-main-post loop sequence. |
|
704 |
// When nonnull, the clone and original are side-by-side, both are |
|
705 |
// dominated by the passed in side_by_side_idom node. Used in |
|
706 |
// construction of unswitched loops. |
|
707 |
void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth, |
|
708 |
Node* side_by_side_idom = NULL); |
|
709 |
||
710 |
// If we got the effect of peeling, either by actually peeling or by |
|
711 |
// making a pre-loop which must execute at least once, we can remove |
|
712 |
// all loop-invariant dominated tests in the main body. |
|
713 |
void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ); |
|
714 |
||
715 |
// Generate code to do a loop peel for the given loop (and body). |
|
716 |
// old_new is a temp array. |
|
717 |
void do_peeling( IdealLoopTree *loop, Node_List &old_new ); |
|
718 |
||
719 |
// Add pre and post loops around the given loop. These loops are used |
|
720 |
// during RCE, unrolling and aligning loops. |
|
721 |
void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ); |
|
722 |
// If Node n lives in the back_ctrl block, we clone a private version of n |
|
723 |
// in preheader_ctrl block and return that, otherwise return n. |
|
724 |
Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ); |
|
725 |
||
726 |
// Take steps to maximally unroll the loop. Peel any odd iterations, then |
|
727 |
// unroll to do double iterations. The next round of major loop transforms |
|
728 |
// will repeat till the doubled loop body does all remaining iterations in 1 |
|
729 |
// pass. |
|
730 |
void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ); |
|
731 |
||
732 |
// Unroll the loop body one step - make each trip do 2 iterations. |
|
733 |
void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ); |
|
734 |
||
735 |
// Return true if exp is a constant times an induction var |
|
736 |
bool is_scaled_iv(Node* exp, Node* iv, int* p_scale); |
|
737 |
||
738 |
// Return true if exp is a scaled induction var plus (or minus) constant |
|
739 |
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); |
|
740 |
||
741 |
// Eliminate range-checks and other trip-counter vs loop-invariant tests. |
|
742 |
void do_range_check( IdealLoopTree *loop, Node_List &old_new ); |
|
743 |
||
744 |
// Create a slow version of the loop by cloning the loop |
|
745 |
// and inserting an if to select fast-slow versions. |
|
746 |
ProjNode* create_slow_version_of_loop(IdealLoopTree *loop, |
|
747 |
Node_List &old_new); |
|
748 |
||
749 |
// Clone loop with an invariant test (that does not exit) and |
|
750 |
// insert a clone of the test that selects which version to |
|
751 |
// execute. |
|
752 |
void do_unswitching (IdealLoopTree *loop, Node_List &old_new); |
|
753 |
||
754 |
// Find candidate "if" for unswitching |
|
755 |
IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const; |
|
756 |
||
757 |
// Range Check Elimination uses this function! |
|
758 |
// Constrain the main loop iterations so the affine function: |
|
759 |
// scale_con * I + offset < limit |
|
760 |
// always holds true. That is, either increase the number of iterations in |
|
761 |
// the pre-loop or the post-loop until the condition holds true in the main |
|
762 |
// loop. Scale_con, offset and limit are all loop invariant. |
|
763 |
void add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ); |
|
764 |
||
765 |
// Partially peel loop up through last_peel node. |
|
766 |
bool partial_peel( IdealLoopTree *loop, Node_List &old_new ); |
|
767 |
||
768 |
// Create a scheduled list of nodes control dependent on ctrl set. |
|
769 |
void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched ); |
|
770 |
// Has a use in the vector set |
|
771 |
bool has_use_in_set( Node* n, VectorSet& vset ); |
|
772 |
// Has use internal to the vector set (ie. not in a phi at the loop head) |
|
773 |
bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ); |
|
774 |
// clone "n" for uses that are outside of loop |
|
775 |
void clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ); |
|
776 |
// clone "n" for special uses that are in the not_peeled region |
|
777 |
void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, |
|
778 |
VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ); |
|
779 |
// Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist |
|
780 |
void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ); |
|
781 |
#ifdef ASSERT |
|
782 |
// Validate the loop partition sets: peel and not_peel |
|
783 |
bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel ); |
|
784 |
// Ensure that uses outside of loop are of the right form |
|
785 |
bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, |
|
786 |
uint orig_exit_idx, uint clone_exit_idx); |
|
787 |
bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx); |
|
788 |
#endif |
|
789 |
||
790 |
// Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.) |
|
791 |
int stride_of_possible_iv( Node* iff ); |
|
792 |
bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; } |
|
793 |
// Return the (unique) control output node that's in the loop (if it exists.) |
|
794 |
Node* stay_in_loop( Node* n, IdealLoopTree *loop); |
|
795 |
// Insert a signed compare loop exit cloned from an unsigned compare. |
|
796 |
IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop); |
|
797 |
void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop); |
|
798 |
// Utility to register node "n" with PhaseIdealLoop |
|
799 |
void register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth); |
|
800 |
// Utility to create an if-projection |
|
801 |
ProjNode* proj_clone(ProjNode* p, IfNode* iff); |
|
802 |
// Force the iff control output to be the live_proj |
|
803 |
Node* short_circuit_if(IfNode* iff, ProjNode* live_proj); |
|
804 |
// Insert a region before an if projection |
|
805 |
RegionNode* insert_region_before_proj(ProjNode* proj); |
|
806 |
// Insert a new if before an if projection |
|
807 |
ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj); |
|
808 |
||
809 |
// Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. |
|
810 |
// "Nearly" because all Nodes have been cloned from the original in the loop, |
|
811 |
// but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs |
|
812 |
// through the Phi recursively, and return a Bool. |
|
813 |
BoolNode *clone_iff( PhiNode *phi, IdealLoopTree *loop ); |
|
814 |
CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop ); |
|
815 |
||
816 |
||
817 |
// Rework addressing expressions to get the most loop-invariant stuff |
|
818 |
// moved out. We'd like to do all associative operators, but it's especially |
|
819 |
// important (common) to do address expressions. |
|
820 |
Node *remix_address_expressions( Node *n ); |
|
821 |
||
822 |
// Attempt to use a conditional move instead of a phi/branch |
|
823 |
Node *conditional_move( Node *n ); |
|
824 |
||
825 |
// Reorganize offset computations to lower register pressure. |
|
826 |
// Mostly prevent loop-fallout uses of the pre-incremented trip counter |
|
827 |
// (which are then alive with the post-incremented trip counter |
|
828 |
// forcing an extra register move) |
|
829 |
void reorg_offsets( IdealLoopTree *loop ); |
|
830 |
||
831 |
// Check for aggressive application of 'split-if' optimization, |
|
832 |
// using basic block level info. |
|
833 |
void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack ); |
|
834 |
Node *split_if_with_blocks_pre ( Node *n ); |
|
835 |
void split_if_with_blocks_post( Node *n ); |
|
836 |
Node *has_local_phi_input( Node *n ); |
|
837 |
// Mark an IfNode as being dominated by a prior test, |
|
838 |
// without actually altering the CFG (and hence IDOM info). |
|
839 |
void dominated_by( Node *prevdom, Node *iff ); |
|
840 |
||
841 |
// Split Node 'n' through merge point |
|
842 |
Node *split_thru_region( Node *n, Node *region ); |
|
843 |
// Split Node 'n' through merge point if there is enough win. |
|
844 |
Node *split_thru_phi( Node *n, Node *region, int policy ); |
|
845 |
// Found an If getting its condition-code input from a Phi in the |
|
846 |
// same block. Split thru the Region. |
|
847 |
void do_split_if( Node *iff ); |
|
848 |
||
849 |
private: |
|
850 |
// Return a type based on condition control flow |
|
851 |
const TypeInt* filtered_type( Node *n, Node* n_ctrl); |
|
852 |
const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); } |
|
853 |
// Helpers for filtered type |
|
854 |
const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl); |
|
855 |
||
856 |
// Helper functions |
|
857 |
void register_new_node( Node *n, Node *blk ); |
|
858 |
Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache ); |
|
859 |
Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ); |
|
860 |
void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ); |
|
861 |
bool split_up( Node *n, Node *blk1, Node *blk2 ); |
|
862 |
void sink_use( Node *use, Node *post_loop ); |
|
863 |
Node *place_near_use( Node *useblock ) const; |
|
864 |
||
865 |
bool _created_loop_node; |
|
866 |
public: |
|
867 |
void set_created_loop_node() { _created_loop_node = true; } |
|
868 |
bool created_loop_node() { return _created_loop_node; } |
|
869 |
||
870 |
#ifndef PRODUCT |
|
871 |
void dump( ) const; |
|
872 |
void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const; |
|
873 |
void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; |
|
874 |
void verify() const; // Major slow :-) |
|
875 |
void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const; |
|
876 |
IdealLoopTree *get_loop_idx(Node* n) const { |
|
877 |
// Dead nodes have no loop, so return the top level loop instead |
|
878 |
return _nodes[n->_idx] ? (IdealLoopTree*)_nodes[n->_idx] : _ltree_root; |
|
879 |
} |
|
880 |
// Print some stats |
|
881 |
static void print_statistics(); |
|
882 |
static int _loop_invokes; // Count of PhaseIdealLoop invokes |
|
883 |
static int _loop_work; // Sum of PhaseIdealLoop x _unique |
|
884 |
#endif |
|
885 |
}; |
|
886 |
||
887 |
inline Node* IdealLoopTree::tail() { |
|
888 |
// Handle lazy update of _tail field |
|
889 |
Node *n = _tail; |
|
890 |
//while( !n->in(0) ) // Skip dead CFG nodes |
|
891 |
//n = n->in(1); |
|
892 |
if (n->in(0) == NULL) |
|
893 |
n = _phase->get_ctrl(n); |
|
894 |
_tail = n; |
|
895 |
return n; |
|
896 |
} |
|
897 |
||
898 |
||
899 |
// Iterate over the loop tree using a preorder, left-to-right traversal. |
|
900 |
// |
|
901 |
// Example that visits all counted loops from within PhaseIdealLoop |
|
902 |
// |
|
903 |
// for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { |
|
904 |
// IdealLoopTree* lpt = iter.current(); |
|
905 |
// if (!lpt->is_counted()) continue; |
|
906 |
// ... |
|
907 |
class LoopTreeIterator : public StackObj { |
|
908 |
private: |
|
909 |
IdealLoopTree* _root; |
|
910 |
IdealLoopTree* _curnt; |
|
911 |
||
912 |
public: |
|
913 |
LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {} |
|
914 |
||
915 |
bool done() { return _curnt == NULL; } // Finished iterating? |
|
916 |
||
917 |
void next(); // Advance to next loop tree |
|
918 |
||
919 |
IdealLoopTree* current() { return _curnt; } // Return current value of iterator. |
|
920 |
}; |