--- a/src/hotspot/share/opto/loopTransform.cpp Wed Apr 17 14:55:29 2019 +0200
+++ b/src/hotspot/share/opto/loopTransform.cpp Wed Apr 17 14:55:37 2019 +0200
@@ -45,13 +45,13 @@
// Given an IfNode, return the loop-exiting projection or NULL if both
// arms remain in the loop.
Node *IdealLoopTree::is_loop_exit(Node *iff) const {
- if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
+ if (iff->outcnt() != 2) return NULL; // Ignore partially dead tests
PhaseIdealLoop *phase = _phase;
// Test is an IfNode, has 2 projections. If BOTH are in the loop
// we need loop unswitching instead of peeling.
- if( !is_member(phase->get_loop( iff->raw_out(0) )) )
+ if (!is_member(phase->get_loop(iff->raw_out(0))))
return iff->raw_out(0);
- if( !is_member(phase->get_loop( iff->raw_out(1) )) )
+ if (!is_member(phase->get_loop(iff->raw_out(1))))
return iff->raw_out(1);
return NULL;
}
@@ -63,7 +63,7 @@
//------------------------------record_for_igvn----------------------------
// Put loop body on igvn work list
void IdealLoopTree::record_for_igvn() {
- for( uint i = 0; i < _body.size(); i++ ) {
+ for (uint i = 0; i < _body.size(); i++) {
Node *n = _body.at(i);
_phase->_igvn._worklist.push(n);
}
@@ -145,7 +145,9 @@
Node *exit = is_loop_exit(iff);
if (exit) {
float exit_prob = iff->_prob;
- if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
+ if (exit->Opcode() == Op_IfFalse) {
+ exit_prob = 1.0 - exit_prob;
+ }
if (exit_prob > PROB_MIN) {
float exit_cnt = iff->_fcnt * exit_prob;
return exit_cnt;
@@ -202,7 +204,7 @@
// Now compute a loop exit count
float loop_exit_cnt = 0.0f;
if (_child == NULL) {
- for( uint i = 0; i < _body.size(); i++ ) {
+ for (uint i = 0; i < _body.size(); i++) {
Node *n = _body[i];
loop_exit_cnt += compute_profile_trip_cnt_helper(n);
}
@@ -342,12 +344,13 @@
//------------------------------policy_peeling---------------------------------
// Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
// make some loop-invariant test (usually a null-check) happen before the loop.
-bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
- Node *test = ((IdealLoopTree*)this)->tail();
- int body_size = ((IdealLoopTree*)this)->_body.size();
+bool IdealLoopTree::policy_peeling(PhaseIdealLoop *phase) const {
+ IdealLoopTree *loop = (IdealLoopTree*)this;
+ Node *test = loop->tail();
+ int body_size = loop->_body.size();
// Peeling does loop cloning which can result in O(N^2) node construction
- if( body_size > 255 /* Prevent overflow for large body_size */
- || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) {
+ if (body_size > 255 /* Prevent overflow for large body_size */
+ || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit()) {
return false; // too large to safely clone
}
@@ -359,17 +362,21 @@
}
}
- while( test != _head ) { // Scan till run off top of loop
- if( test->is_If() ) { // Test?
+ while (test != _head) { // Scan till run off top of loop
+ if (test->is_If()) { // Test?
Node *ctrl = phase->get_ctrl(test->in(1));
- if (ctrl->is_top())
+ if (ctrl->is_top()) {
return false; // Found dead test on live IF? No peeling!
+ }
// Standard IF only has one input value to check for loop invariance
- assert(test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd || test->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
+ assert(test->Opcode() == Op_If ||
+ test->Opcode() == Op_CountedLoopEnd ||
+ test->Opcode() == Op_RangeCheck,
+ "Check this code when new subtype is added");
// Condition is not a member of this loop?
- if( !is_member(phase->get_loop(ctrl)) &&
- is_loop_exit(test) )
+ if (!is_member(phase->get_loop(ctrl)) && is_loop_exit(test)) {
return true; // Found reason to peel!
+ }
}
// Walk up dominators to loop _head looking for test which is
// executed on every path thru loop.
@@ -382,27 +389,27 @@
// If we got the effect of peeling, either by actually peeling or by making
// a pre-loop which must execute at least once, we can remove all
// loop-invariant dominated tests in the main body.
-void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
+void PhaseIdealLoop::peeled_dom_test_elim(IdealLoopTree *loop, Node_List &old_new) {
bool progress = true;
- while( progress ) {
+ while (progress) {
progress = false; // Reset for next iteration
Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
Node *test = prev->in(0);
- while( test != loop->_head ) { // Scan till run off top of loop
+ while (test != loop->_head) { // Scan till run off top of loop
int p_op = prev->Opcode();
- if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
+ if ((p_op == Op_IfFalse || p_op == Op_IfTrue) &&
test->is_If() && // Test?
!test->in(1)->is_Con() && // And not already obvious?
// Condition is not a member of this loop?
!loop->is_member(get_loop(get_ctrl(test->in(1))))){
// Walk loop body looking for instances of this test
- for( uint i = 0; i < loop->_body.size(); i++ ) {
+ for (uint i = 0; i < loop->_body.size(); i++) {
Node *n = loop->_body.at(i);
- if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
+ if (n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/) {
// IfNode was dominated by version in peeled loop body
progress = true;
- dominated_by( old_new[prev->_idx], n );
+ dominated_by(old_new[prev->_idx], n);
}
}
}
@@ -410,7 +417,7 @@
test = idom(test);
} // End of scan tests in loop
- } // End of while( progress )
+ } // End of while (progress)
}
//------------------------------do_peeling-------------------------------------
@@ -551,7 +558,7 @@
// v
// exit
//
-void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
+void PhaseIdealLoop::do_peeling(IdealLoopTree *loop, Node_List &old_new) {
C->set_major_progress();
// Peeling a 'main' loop in a pre/main/post situation obfuscates the
@@ -600,7 +607,7 @@
Node* old = head->fast_out(j);
if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
- if (!new_exit_value ) // Backedge value is ALSO loop invariant?
+ if (!new_exit_value) // Backedge value is ALSO loop invariant?
// Then loop body backedge value remains the same.
new_exit_value = old->in(LoopNode::LoopBackControl);
_igvn.hash_delete(old);
@@ -629,8 +636,9 @@
for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
Node *old = loop->_body.at(j3);
Node *nnn = old_new[old->_idx];
- if (!has_ctrl(nnn))
+ if (!has_ctrl(nnn)) {
set_idom(nnn, idom(nnn), dd-1);
+ }
}
// Now force out all loop-invariant dominating tests. The optimizer
@@ -645,12 +653,12 @@
//------------------------------policy_maximally_unroll------------------------
// Calculate exact loop trip count and return true if loop can be maximally
// unrolled.
-bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_maximally_unroll(PhaseIdealLoop *phase) const {
CountedLoopNode *cl = _head->as_CountedLoop();
assert(cl->is_normal_loop(), "");
- if (!cl->is_valid_counted_loop())
+ if (!cl->is_valid_counted_loop()) {
return false; // Malformed counted loop
-
+ }
if (!cl->has_exact_trip_count()) {
// Trip count is not exact.
return false;
@@ -666,7 +674,7 @@
// size. After all, it will no longer be a loop.
uint body_size = _body.size();
uint unroll_limit = (uint)LoopUnrollLimit * 4;
- assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
+ assert((intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
if (trip_count > unroll_limit || body_size > unroll_limit) {
return false;
}
@@ -918,14 +926,14 @@
// aligned in a loop (unless the VM guarantees mutual alignment). Note that
// if we vectorize short memory ops into longer memory ops, we may want to
// increase alignment.
-bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_align(PhaseIdealLoop *phase) const {
return false;
}
//------------------------------policy_range_check-----------------------------
// Return TRUE or FALSE if the loop should be range-check-eliminated.
// Actually we do iteration-splitting, a more powerful form of RCE.
-bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_range_check(PhaseIdealLoop *phase) const {
if (!RangeCheckElimination) return false;
CountedLoopNode *cl = _head->as_CountedLoop();
@@ -947,28 +955,32 @@
// Comparing trip+off vs limit
Node *bol = iff->in(1);
- if (bol->req() != 2) continue; // dead constant test
+ if (bol->req() != 2) {
+ continue; // dead constant test
+ }
if (!bol->is_Bool()) {
assert(bol->Opcode() == Op_Conv2B, "predicate check only");
continue;
}
- if (bol->as_Bool()->_test._test == BoolTest::ne)
+ if (bol->as_Bool()->_test._test == BoolTest::ne) {
continue; // not RC
-
+ }
Node *cmp = bol->in(1);
Node *rc_exp = cmp->in(1);
Node *limit = cmp->in(2);
Node *limit_c = phase->get_ctrl(limit);
- if( limit_c == phase->C->top() )
+ if (limit_c == phase->C->top()) {
return false; // Found dead test on live IF? No RCE!
- if( is_member(phase->get_loop(limit_c) ) ) {
+ }
+ if (is_member(phase->get_loop(limit_c))) {
// Compare might have operands swapped; commute them
rc_exp = cmp->in(2);
limit = cmp->in(1);
limit_c = phase->get_ctrl(limit);
- if( is_member(phase->get_loop(limit_c) ) )
+ if (is_member(phase->get_loop(limit_c))) {
continue; // Both inputs are loop varying; cannot RCE
+ }
}
if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
@@ -977,8 +989,9 @@
// Yeah! Found a test like 'trip+off vs limit'
// Test is an IfNode, has 2 projections. If BOTH are in the loop
// we need loop unswitching instead of iteration splitting.
- if( is_loop_exit(iff) )
+ if (is_loop_exit(iff)) {
return true; // Found reason to split iterations
+ }
} // End of is IF
}
@@ -988,16 +1001,17 @@
//------------------------------policy_peel_only-------------------------------
// Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful
// for unrolling loops with NO array accesses.
-bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_peel_only(PhaseIdealLoop *phase) const {
// check for vectorized loops, any peeling done was already applied
if (_head->is_CountedLoop() && _head->as_CountedLoop()->is_unroll_only()) {
return false;
}
- for( uint i = 0; i < _body.size(); i++ )
- if( _body[i]->is_Mem() )
+ for (uint i = 0; i < _body.size(); i++) {
+ if (_body[i]->is_Mem()) {
return false;
-
+ }
+ }
// No memory accesses at all!
return true;
}
@@ -1005,33 +1019,31 @@
//------------------------------clone_up_backedge_goo--------------------------
// If Node n lives in the back_ctrl block and cannot float, we clone a private
// version of n in preheader_ctrl block and return that, otherwise return n.
-Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) {
- if( get_ctrl(n) != back_ctrl ) return n;
+Node *PhaseIdealLoop::clone_up_backedge_goo(Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones) {
+ if (get_ctrl(n) != back_ctrl) return n;
// Only visit once
if (visited.test_set(n->_idx)) {
Node *x = clones.find(n->_idx);
- if (x != NULL)
- return x;
- return n;
+ return (x != NULL) ? x : n;
}
Node *x = NULL; // If required, a clone of 'n'
// Check for 'n' being pinned in the backedge.
- if( n->in(0) && n->in(0) == back_ctrl ) {
+ if (n->in(0) && n->in(0) == back_ctrl) {
assert(clones.find(n->_idx) == NULL, "dead loop");
x = n->clone(); // Clone a copy of 'n' to preheader
clones.push(x, n->_idx);
- x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
+ x->set_req(0, preheader_ctrl); // Fix x's control input to preheader
}
// Recursive fixup any other input edges into x.
// If there are no changes we can just return 'n', otherwise
// we need to clone a private copy and change it.
- for( uint i = 1; i < n->req(); i++ ) {
- Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones );
- if( g != n->in(i) ) {
- if( !x ) {
+ for (uint i = 1; i < n->req(); i++) {
+ Node *g = clone_up_backedge_goo(back_ctrl, preheader_ctrl, n->in(i), visited, clones);
+ if (g != n->in(i)) {
+ if (!x) {
assert(clones.find(n->_idx) == NULL, "dead loop");
x = n->clone();
clones.push(x, n->_idx);
@@ -1039,11 +1051,11 @@
x->set_req(i, g);
}
}
- if( x ) { // x can legally float to pre-header location
- register_new_node( x, preheader_ctrl );
+ if (x) { // x can legally float to pre-header location
+ register_new_node(x, preheader_ctrl);
return x;
} else { // raise n to cover LCA of uses
- set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
+ set_ctrl(n, find_non_split_ctrl(back_ctrl->in(0)));
}
return n;
}
@@ -1247,7 +1259,7 @@
// Insert pre and post loops. If peel_only is set, the pre-loop can not have
// more iterations added. It acts as a 'peel' only, no lower-bound RCE, no
// alignment. Useful to unroll loops that do no array accesses.
-void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
+void PhaseIdealLoop::insert_pre_post_loops(IdealLoopTree *loop, Node_List &old_new, bool peel_only) {
#ifndef PRODUCT
if (TraceLoopOpts) {
@@ -1262,9 +1274,9 @@
// Find common pieces of the loop being guarded with pre & post loops
CountedLoopNode *main_head = loop->_head->as_CountedLoop();
- assert( main_head->is_normal_loop(), "" );
+ assert(main_head->is_normal_loop(), "");
CountedLoopEndNode *main_end = main_head->loopexit();
- assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
+ assert(main_end->outcnt() == 2, "1 true, 1 false path only");
Node *pre_header= main_head->in(LoopNode::EntryControl);
Node *init = main_head->init_trip();
@@ -1276,13 +1288,13 @@
// Need only 1 user of 'bol' because I will be hacking the loop bounds.
Node *bol = main_end->in(CountedLoopEndNode::TestValue);
- if( bol->outcnt() != 1 ) {
+ if (bol->outcnt() != 1) {
bol = bol->clone();
register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
_igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol);
}
// Need only 1 user of 'cmp' because I will be hacking the loop bounds.
- if( cmp->outcnt() != 1 ) {
+ if (cmp->outcnt() != 1) {
cmp = cmp->clone();
register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
_igvn.replace_input_of(bol, 1, cmp);
@@ -1317,9 +1329,9 @@
// Find the pre-loop normal exit.
Node* pre_exit = pre_end->proj_out(false);
- assert( pre_exit->Opcode() == Op_IfFalse, "" );
+ assert(pre_exit->Opcode() == Op_IfFalse, "");
IfFalseNode *new_pre_exit = new IfFalseNode(pre_end);
- _igvn.register_new_node_with_optimizer( new_pre_exit );
+ _igvn.register_new_node_with_optimizer(new_pre_exit);
set_idom(new_pre_exit, pre_end, dd_main_head);
set_loop(new_pre_exit, outer_loop->_parent);
@@ -1328,26 +1340,26 @@
// zero-trip guard will become the minimum-trip guard when we unroll
// the main-loop.
Node *min_opaq = new Opaque1Node(C, limit);
- Node *min_cmp = new CmpINode( pre_incr, min_opaq );
- Node *min_bol = new BoolNode( min_cmp, b_test );
- register_new_node( min_opaq, new_pre_exit );
- register_new_node( min_cmp , new_pre_exit );
- register_new_node( min_bol , new_pre_exit );
+ Node *min_cmp = new CmpINode(pre_incr, min_opaq);
+ Node *min_bol = new BoolNode(min_cmp, b_test);
+ register_new_node(min_opaq, new_pre_exit);
+ register_new_node(min_cmp , new_pre_exit);
+ register_new_node(min_bol , new_pre_exit);
// Build the IfNode (assume the main-loop is executed always).
- IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
- _igvn.register_new_node_with_optimizer( min_iff );
+ IfNode *min_iff = new IfNode(new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN);
+ _igvn.register_new_node_with_optimizer(min_iff);
set_idom(min_iff, new_pre_exit, dd_main_head);
set_loop(min_iff, outer_loop->_parent);
// Plug in the false-path, taken if we need to skip main-loop
- _igvn.hash_delete( pre_exit );
+ _igvn.hash_delete(pre_exit);
pre_exit->set_req(0, min_iff);
set_idom(pre_exit, min_iff, dd_main_head);
set_idom(pre_exit->unique_ctrl_out(), min_iff, dd_main_head);
// Make the true-path, must enter the main loop
- Node *min_taken = new IfTrueNode( min_iff );
- _igvn.register_new_node_with_optimizer( min_taken );
+ Node *min_taken = new IfTrueNode(min_iff);
+ _igvn.register_new_node_with_optimizer(min_taken);
set_idom(min_taken, min_iff, dd_main_head);
set_loop(min_taken, outer_loop->_parent);
// Plug in the true path
@@ -1362,14 +1374,14 @@
// fall-out values of the pre-loop.
for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
Node* main_phi = main_head->fast_out(i2);
- if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
+ if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0) {
Node *pre_phi = old_new[main_phi->_idx];
Node *fallpre = clone_up_backedge_goo(pre_head->back_control(),
main_head->skip_strip_mined()->in(LoopNode::EntryControl),
pre_phi->in(LoopNode::LoopBackControl),
visited, clones);
_igvn.hash_delete(main_phi);
- main_phi->set_req( LoopNode::EntryControl, fallpre );
+ main_phi->set_req(LoopNode::EntryControl, fallpre);
}
}
@@ -1384,7 +1396,7 @@
// dependencies.
// CastII for the main loop:
- Node* castii = cast_incr_before_loop( pre_incr, min_taken, main_head );
+ Node* castii = cast_incr_before_loop(pre_incr, min_taken, main_head);
assert(castii != NULL, "no castII inserted");
Node* opaque_castii = new Opaque1Node(C, castii);
register_new_node(opaque_castii, outer_main_head->in(LoopNode::EntryControl));
@@ -1393,18 +1405,18 @@
// Step B4: Shorten the pre-loop to run only 1 iteration (for now).
// RCE and alignment may change this later.
Node *cmp_end = pre_end->cmp_node();
- assert( cmp_end->in(2) == limit, "" );
- Node *pre_limit = new AddINode( init, stride );
+ assert(cmp_end->in(2) == limit, "");
+ Node *pre_limit = new AddINode(init, stride);
// Save the original loop limit in this Opaque1 node for
// use by range check elimination.
Node *pre_opaq = new Opaque1Node(C, pre_limit, limit);
- register_new_node( pre_limit, pre_head->in(0) );
- register_new_node( pre_opaq , pre_head->in(0) );
+ register_new_node(pre_limit, pre_head->in(0));
+ register_new_node(pre_opaq , pre_head->in(0));
// Since no other users of pre-loop compare, I can hack limit directly
- assert( cmp_end->outcnt() == 1, "no other users" );
+ assert(cmp_end->outcnt() == 1, "no other users");
_igvn.hash_delete(cmp_end);
cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
@@ -1424,24 +1436,26 @@
// Modify pre loop end condition
Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test);
- register_new_node( new_bol0, pre_head->in(0) );
+ register_new_node(new_bol0, pre_head->in(0));
_igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0);
// Modify main loop guard condition
assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test);
- register_new_node( new_bol1, new_pre_exit );
+ register_new_node(new_bol1, new_pre_exit);
_igvn.hash_delete(min_iff);
min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
// Modify main loop end condition
BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test);
- register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
+ register_new_node(new_bol2, main_end->in(CountedLoopEndNode::TestControl));
_igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2);
}
// Flag main loop
main_head->set_main_loop();
- if( peel_only ) main_head->set_main_no_pre_loop();
+ if (peel_only) {
+ main_head->set_main_no_pre_loop();
+ }
// Subtract a trip count for the pre-loop.
main_head->set_trip_count(main_head->trip_count() - 1);
@@ -1460,8 +1474,9 @@
//------------------------------insert_vector_post_loop------------------------
// Insert a copy of the atomic unrolled vectorized main loop as a post loop,
-// unroll_policy has already informed us that more unrolling is about to happen to
-// the main loop. The resultant post loop will serve as a vectorized drain loop.
+// unroll_policy has already informed us that more unrolling is about to
+// happen to the main loop. The resultant post loop will serve as a
+// vectorized drain loop.
void PhaseIdealLoop::insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new) {
if (!loop->_head->is_CountedLoop()) return;
@@ -1641,7 +1656,7 @@
// fall-out values of the main-loop.
for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
Node* main_phi = main_head->fast_out(i);
- if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0) {
+ if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0) {
Node *cur_phi = old_new[main_phi->_idx];
Node *fallnew = clone_up_backedge_goo(main_head->back_control(),
post_head->init_control(),
@@ -1713,7 +1728,7 @@
//------------------------------do_unroll--------------------------------------
// Unroll the loop body one step - make each trip do 2 iterations.
-void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
+void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip) {
assert(LoopUnrollLimit, "");
CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
CountedLoopEndNode *loop_end = loop_head->loopexit();
@@ -1736,8 +1751,8 @@
Node_List rpo_list;
VectorSet visited(arena);
visited.set(loop_head->_idx);
- rpo( loop_head, stack, visited, rpo_list );
- dump(loop, rpo_list.size(), rpo_list );
+ rpo(loop_head, stack, visited, rpo_list);
+ dump(loop, rpo_list.size(), rpo_list);
}
#endif
@@ -1832,8 +1847,8 @@
// zero trip guard limit will be different from loop limit.
assert(has_ctrl(opaq), "should have it");
Node* opaq_ctrl = get_ctrl(opaq);
- limit = new Opaque2Node( C, limit );
- register_new_node( limit, opaq_ctrl );
+ limit = new Opaque2Node(C, limit);
+ register_new_node(limit, opaq_ctrl);
}
if ((stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo)) ||
(stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi))) {
@@ -1874,11 +1889,11 @@
adj_limit = new SubINode(limit, stride);
}
assert(old_limit != NULL && adj_limit != NULL, "");
- register_new_node( adj_limit, ctrl ); // adjust amount
+ register_new_node(adj_limit, ctrl); // adjust amount
Node* adj_cmp = new CmpINode(old_limit, adj_limit);
- register_new_node( adj_cmp, ctrl );
+ register_new_node(adj_cmp, ctrl);
Node* adj_bool = new BoolNode(adj_cmp, bt);
- register_new_node( adj_bool, ctrl );
+ register_new_node(adj_bool, ctrl);
new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT);
}
register_new_node(new_limit, ctrl);
@@ -1932,10 +1947,10 @@
// Make the fall-in from the original come from the fall-out of the clone.
for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
Node* phi = loop_head->fast_out(j);
- if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
+ if (phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0) {
Node *newphi = old_new[phi->_idx];
- _igvn.hash_delete( phi );
- _igvn.hash_delete( newphi );
+ _igvn.hash_delete(phi);
+ _igvn.hash_delete(newphi);
phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl));
newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl));
@@ -1943,7 +1958,7 @@
}
}
Node *clone_head = old_new[loop_head->_idx];
- _igvn.hash_delete( clone_head );
+ _igvn.hash_delete(clone_head);
loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl));
clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
loop_head ->set_req(LoopNode::LoopBackControl, C->top());
@@ -1954,18 +1969,19 @@
// Kill the clone's backedge
Node *newcle = old_new[loop_end->_idx];
- _igvn.hash_delete( newcle );
+ _igvn.hash_delete(newcle);
Node *one = _igvn.intcon(1);
set_ctrl(one, C->root());
newcle->set_req(1, one);
// Force clone into same loop body
uint max = loop->_body.size();
- for( uint k = 0; k < max; k++ ) {
+ for (uint k = 0; k < max; k++) {
Node *old = loop->_body.at(k);
Node *nnn = old_new[old->_idx];
loop->_body.push(nnn);
- if (!has_ctrl(old))
+ if (!has_ctrl(old)) {
set_loop(nnn, loop);
+ }
}
loop->record_for_igvn();
@@ -1977,7 +1993,7 @@
for (uint i = 0; i < loop->_body.size(); i++) {
loop->_body.at(i)->dump();
}
- if(C->clone_map().is_debug()) {
+ if (C->clone_map().is_debug()) {
tty->print("\nCloneMap\n");
Dict* dict = C->clone_map().dict();
DictI i(dict);
@@ -1998,7 +2014,7 @@
//------------------------------do_maximally_unroll----------------------------
-void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
+void PhaseIdealLoop::do_maximally_unroll(IdealLoopTree *loop, Node_List &old_new) {
CountedLoopNode *cl = loop->_head->as_CountedLoop();
assert(cl->has_exact_trip_count(), "trip count is not exact");
assert(cl->trip_count() > 0, "");
@@ -2116,7 +2132,7 @@
// the pre-loop or the post-loop until the condition holds true in the main
// loop. Stride, scale, offset and limit are all loop invariant. Further,
// stride and scale are constants (offset and limit often are).
-void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
+void PhaseIdealLoop::add_constraint(int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit) {
// For positive stride, the pre-loop limit always uses a MAX function
// and the main loop a MIN function. For negative stride these are
// reversed.
@@ -2201,7 +2217,7 @@
set_ctrl(one, C->root());
Node *plus_one = new AddINode(offset, one);
- register_new_node( plus_one, pre_ctrl );
+ register_new_node(plus_one, pre_ctrl);
// Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
*pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl,
scale_con < -1 && stride_con > 0);
@@ -2370,7 +2386,7 @@
//------------------------------do_range_check---------------------------------
// Eliminate range-checks and other trip-counter vs loop-invariant tests.
-int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
+int PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
#ifndef PRODUCT
if (PrintOpto && VerifyLoopOptimizations) {
tty->print("Range Check Elimination ");
@@ -2380,15 +2396,16 @@
loop->dump_head();
}
#endif
+
assert(RangeCheckElimination, "");
CountedLoopNode *cl = loop->_head->as_CountedLoop();
// If we fail before trying to eliminate range checks, set multiversion state
int closed_range_checks = 1;
// protect against stride not being a constant
- if (!cl->stride_is_con())
+ if (!cl->stride_is_con()) {
return closed_range_checks;
-
+ }
// Find the trip counter; we are iteration splitting based on it
Node *trip_counter = cl->phi();
// Find the main loop limit; we will trim it's iterations
@@ -2403,7 +2420,7 @@
}
// Need to find the main-loop zero-trip guard
- Node *ctrl = cl->skip_predicates();
+ Node *ctrl = cl->skip_predicates();
Node *iffm = ctrl->in(0);
Node *opqzm = iffm->in(1)->in(1)->in(2);
assert(opqzm->in(1) == main_limit, "do not understand situation");
@@ -2421,8 +2438,9 @@
// Occasionally it's possible for a pre-loop Opaque1 node to be
// optimized away and then another round of loop opts attempted.
// We can not optimize this particular loop in that case.
- if (pre_opaq1->Opcode() != Op_Opaque1)
+ if (pre_opaq1->Opcode() != Op_Opaque1) {
return closed_range_checks;
+ }
Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
Node *pre_limit = pre_opaq->in(1);
@@ -2432,9 +2450,9 @@
// Ensure the original loop limit is available from the
// pre-loop Opaque1 node.
Node *orig_limit = pre_opaq->original_loop_limit();
- if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
+ if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) {
return closed_range_checks;
-
+ }
// Must know if its a count-up or count-down loop
int stride_con = cl->stride_con();
@@ -2460,8 +2478,9 @@
Node* predicate_proj = cl->skip_strip_mined()->in(LoopNode::EntryControl);
assert(predicate_proj->is_Proj() && predicate_proj->in(0)->is_If(), "if projection only");
+
// Check loop body for tests of trip-counter plus loop-invariant vs loop-variant.
- for( uint i = 0; i < loop->_body.size(); i++ ) {
+ for (uint i = 0; i < loop->_body.size(); i++) {
Node *iff = loop->_body[i];
if (iff->Opcode() == Op_If ||
iff->Opcode() == Op_RangeCheck) { // Test?
@@ -2469,18 +2488,18 @@
// we need loop unswitching instead of iteration splitting.
closed_range_checks++;
Node *exit = loop->is_loop_exit(iff);
- if( !exit ) continue;
+ if (!exit) continue;
int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
// Get boolean condition to test
Node *i1 = iff->in(1);
- if( !i1->is_Bool() ) continue;
+ if (!i1->is_Bool()) continue;
BoolNode *bol = i1->as_Bool();
BoolTest b_test = bol->_test;
// Flip sense of test if exit condition is flipped
- if( flip )
+ if (flip) {
b_test = b_test.negate();
-
+ }
// Get compare
Node *cmp = bol->in(1);
@@ -2490,14 +2509,15 @@
int scale_con= 1; // Assume trip counter not scaled
Node *limit_c = get_ctrl(limit);
- if( loop->is_member(get_loop(limit_c) ) ) {
+ if (loop->is_member(get_loop(limit_c))) {
// Compare might have operands swapped; commute them
b_test = b_test.commute();
rc_exp = cmp->in(2);
limit = cmp->in(1);
limit_c = get_ctrl(limit);
- if( loop->is_member(get_loop(limit_c) ) )
+ if (loop->is_member(get_loop(limit_c))) {
continue; // Both inputs are loop varying; cannot RCE
+ }
}
// Here we know 'limit' is loop invariant
@@ -2516,8 +2536,9 @@
}
Node *offset_c = get_ctrl(offset);
- if( loop->is_member( get_loop(offset_c) ) )
+ if (loop->is_member(get_loop(offset_c))) {
continue; // Offset is not really loop invariant
+ }
// Here we know 'offset' is loop invariant.
// As above for the 'limit', the 'offset' maybe pinned below the
@@ -2539,10 +2560,10 @@
// sense of the test.
// Adjust pre and main loop limits to guard the correct iteration set
- if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
- if( b_test._test == BoolTest::lt ) { // Range checks always use lt
+ if (cmp->Opcode() == Op_CmpU) { // Unsigned compare is really 2 tests
+ if (b_test._test == BoolTest::lt) { // Range checks always use lt
// The underflow and overflow limits: 0 <= scale*I+offset < limit
- add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
+ add_constraint(stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit);
// (0-offset)/scale could be outside of loop iterations range.
conditional_rc = true;
Node* init = cl->init_trip();
@@ -2569,29 +2590,29 @@
continue; // In release mode, ignore it
}
} else { // Otherwise work on normal compares
- switch( b_test._test ) {
+ switch(b_test._test) {
case BoolTest::gt:
// Fall into GE case
case BoolTest::ge:
// Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit
scale_con = -scale_con;
- offset = new SubINode( zero, offset );
- register_new_node( offset, pre_ctrl );
- limit = new SubINode( zero, limit );
- register_new_node( limit, pre_ctrl );
+ offset = new SubINode(zero, offset);
+ register_new_node(offset, pre_ctrl);
+ limit = new SubINode(zero, limit);
+ register_new_node(limit, pre_ctrl);
// Fall into LE case
case BoolTest::le:
if (b_test._test != BoolTest::gt) {
// Convert X <= Y to X < Y+1
- limit = new AddINode( limit, one );
- register_new_node( limit, pre_ctrl );
+ limit = new AddINode(limit, one);
+ register_new_node(limit, pre_ctrl);
}
// Fall into LT case
case BoolTest::lt:
// The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
// Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
// to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
- add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
+ add_constraint(stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit);
// ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
// Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
// still be outside of loop range.
@@ -2607,7 +2628,7 @@
// Kill the eliminated test
C->set_major_progress();
- Node *kill_con = _igvn.intcon( 1-flip );
+ Node *kill_con = _igvn.intcon(1-flip);
set_ctrl(kill_con, C->root());
_igvn.replace_input_of(iff, 1, kill_con);
// Find surviving projection
@@ -2627,9 +2648,7 @@
if (limit->Opcode() == Op_LoadRange) {
closed_range_checks--;
}
-
} // End of is IF
-
}
if (predicate_proj != cl->skip_strip_mined()->in(LoopNode::EntryControl)) {
_igvn.replace_input_of(cl->skip_strip_mined(), LoopNode::EntryControl, predicate_proj);
@@ -2650,21 +2669,21 @@
Node *main_cle = cl->loopexit();
Node *main_bol = main_cle->in(1);
// Hacking loop bounds; need private copies of exit test
- if( main_bol->outcnt() > 1 ) {// BoolNode shared?
- main_bol = main_bol->clone();// Clone a private BoolNode
- register_new_node( main_bol, main_cle->in(0) );
+ if (main_bol->outcnt() > 1) { // BoolNode shared?
+ main_bol = main_bol->clone(); // Clone a private BoolNode
+ register_new_node(main_bol, main_cle->in(0));
_igvn.replace_input_of(main_cle, 1, main_bol);
}
Node *main_cmp = main_bol->in(1);
- if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
- main_cmp = main_cmp->clone();// Clone a private CmpNode
- register_new_node( main_cmp, main_cle->in(0) );
+ if (main_cmp->outcnt() > 1) { // CmpNode shared?
+ main_cmp = main_cmp->clone(); // Clone a private CmpNode
+ register_new_node(main_cmp, main_cle->in(0));
_igvn.replace_input_of(main_bol, 1, main_cmp);
}
// Hack the now-private loop bounds
_igvn.replace_input_of(main_cmp, 2, main_limit);
// The OpaqueNode is unshared by design
- assert( opqzm->outcnt() == 1, "cannot hack shared node" );
+ assert(opqzm->outcnt() == 1, "cannot hack shared node");
_igvn.replace_input_of(opqzm, 1, main_limit);
return closed_range_checks;
@@ -2833,64 +2852,67 @@
//------------------------------DCE_loop_body----------------------------------
// Remove simplistic dead code from loop body
void IdealLoopTree::DCE_loop_body() {
- for( uint i = 0; i < _body.size(); i++ )
- if( _body.at(i)->outcnt() == 0 )
- _body.map( i--, _body.pop() );
+ for (uint i = 0; i < _body.size(); i++) {
+ if (_body.at(i)->outcnt() == 0) {
+ _body.map(i, _body.pop());
+ i--; // Ensure we revisit the updated index.
+ }
+ }
}
//------------------------------adjust_loop_exit_prob--------------------------
// Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
// Replace with a 1-in-10 exit guess.
-void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
+void IdealLoopTree::adjust_loop_exit_prob(PhaseIdealLoop *phase) {
Node *test = tail();
- while( test != _head ) {
+ while (test != _head) {
uint top = test->Opcode();
- if( top == Op_IfTrue || top == Op_IfFalse ) {
+ if (top == Op_IfTrue || top == Op_IfFalse) {
int test_con = ((ProjNode*)test)->_con;
assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
IfNode *iff = test->in(0)->as_If();
- if( iff->outcnt() == 2 ) { // Ignore dead tests
+ if (iff->outcnt() == 2) { // Ignore dead tests
Node *bol = iff->in(1);
- if( bol && bol->req() > 1 && bol->in(1) &&
- ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
- (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
- (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeB ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeS ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeI ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeL ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeP ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeN ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapB ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapS ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapI ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapL ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapP ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapN ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapB ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapS ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapN ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeP ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeN ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapP ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapN ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapP ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapN )))
+ if (bol && bol->req() > 1 && bol->in(1) &&
+ ((bol->in(1)->Opcode() == Op_StorePConditional) ||
+ (bol->in(1)->Opcode() == Op_StoreIConditional) ||
+ (bol->in(1)->Opcode() == Op_StoreLConditional) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeB) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeS) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeI) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeL) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeP) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeN) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapB) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapS) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapI) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapL) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapP) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapN) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapB) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapS) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapI) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapL) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapP) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapN) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeP) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeN) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapP) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapN) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapP) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapN)))
return; // Allocation loops RARELY take backedge
// Find the OTHER exit path from the IF
Node* ex = iff->proj_out(1-test_con);
float p = iff->_prob;
- if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
- if( top == Op_IfTrue ) {
- if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
+ if (!phase->is_member(this, ex) && iff->_fcnt == COUNT_UNKNOWN) {
+ if (top == Op_IfTrue) {
+ if (p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
iff->_prob = PROB_STATIC_FREQUENT;
}
} else {
- if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
+ if (p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
iff->_prob = PROB_STATIC_INFREQUENT;
}
}
@@ -2958,20 +2980,22 @@
// the loop.
bool IdealLoopTree::do_remove_empty_loop(PhaseIdealLoop *phase) {
// Minimum size must be empty loop
- if (_body.size() > EMPTY_LOOP_SIZE)
+ if (_body.size() > EMPTY_LOOP_SIZE) {
return false;
-
- if (!_head->is_CountedLoop())
- return false; // Dead loop
+ }
+ if (!_head->is_CountedLoop()) {
+ return false; // Dead loop
+ }
CountedLoopNode *cl = _head->as_CountedLoop();
- if (!cl->is_valid_counted_loop())
- return false; // Malformed loop
- if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
- return false; // Infinite loop
-
+ if (!cl->is_valid_counted_loop()) {
+ return false; // Malformed loop
+ }
+ if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) {
+ return false; // Infinite loop
+ }
if (cl->is_pre_loop()) {
- // If the loop we are removing is a pre-loop then the main and
- // post loop can be removed as well
+ // If the loop we are removing is a pre-loop then the main and post loop
+ // can be removed as well.
remove_main_post_loops(cl, phase);
}
@@ -2981,11 +3005,11 @@
for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
Node* n = cl->fast_out(i);
if (n->Opcode() == Op_Phi) {
- assert(iv == NULL, "Too many phis" );
+ assert(iv == NULL, "Too many phis");
iv = n;
}
}
- assert(iv == cl->phi(), "Wrong phi" );
+ assert(iv == cl->phi(), "Wrong phi");
#endif
// main and post loops have explicitly created zero trip guard
@@ -3059,7 +3083,7 @@
}
// Note: the final value after increment should not overflow since
// counted loop has limit check predicate.
- Node *final = new SubINode( exact_limit, cl->stride() );
+ Node *final = new SubINode(exact_limit, cl->stride());
phase->register_new_node(final,cl->in(LoopNode::EntryControl));
phase->_igvn.replace_node(phi,final);
phase->C->set_major_progress();
@@ -3078,7 +3102,7 @@
}
#ifndef PRODUCT
- if(TraceLoopOpts) {
+ if (TraceLoopOpts) {
tty->print("OneIteration ");
this->dump_head();
}
@@ -3099,7 +3123,7 @@
//=============================================================================
//------------------------------iteration_split_impl---------------------------
-bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
+bool IdealLoopTree::iteration_split_impl(PhaseIdealLoop *phase, Node_List &old_new) {
// Compute loop trip count if possible.
compute_trip_count(phase);
@@ -3226,14 +3250,14 @@
phase->do_unroll(this, old_new, true);
}
- // Adjust the pre-loop limits to align the main body
- // iterations.
- if (should_align)
+ // Adjust the pre-loop limits to align the main body iterations.
+ if (should_align) {
Unimplemented();
-
+ }
} else { // Else we have an unchanged counted loop
- if (should_peel) // Might want to peel but do nothing else
+ if (should_peel) { // Might want to peel but do nothing else
phase->do_peeling(this,old_new);
+ }
}
return true;
}
@@ -3241,10 +3265,11 @@
//=============================================================================
//------------------------------iteration_split--------------------------------
-bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
+bool IdealLoopTree::iteration_split(PhaseIdealLoop* phase, Node_List &old_new) {
// Recursively iteration split nested loops
- if (_child && !_child->iteration_split(phase, old_new))
+ if (_child && !_child->iteration_split(phase, old_new)) {
return false;
+ }
// Clean out prior deadwood
DCE_loop_body();
@@ -3270,8 +3295,9 @@
// trip counter when there was no major reshaping.
phase->reorg_offsets(this);
- if (_next && !_next->iteration_split(phase, old_new))
+ if (_next && !_next->iteration_split(phase, old_new)) {
return false;
+ }
return true;
}