author | zmajo |
Tue, 30 Aug 2016 09:30:16 +0200 | |
changeset 41052 | 3362c4368286 |
parent 37248 | 11a660dbbb8e |
child 42081 | 30a0176b8af3 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
37248 | 2 |
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3186
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3186
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3186
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "libadt/vectset.hpp" |
|
27 |
#include "memory/allocation.inline.hpp" |
|
37248 | 28 |
#include "memory/resourceArea.hpp" |
7397 | 29 |
#include "opto/block.hpp" |
30 |
#include "opto/c2compiler.hpp" |
|
31 |
#include "opto/callnode.hpp" |
|
32 |
#include "opto/cfgnode.hpp" |
|
33 |
#include "opto/machnode.hpp" |
|
34 |
#include "opto/opcodes.hpp" |
|
35 |
#include "opto/phaseX.hpp" |
|
36 |
#include "opto/rootnode.hpp" |
|
37 |
#include "opto/runtime.hpp" |
|
33065 | 38 |
#include "opto/chaitin.hpp" |
7397 | 39 |
#include "runtime/deoptimization.hpp" |
40 |
||
1 | 41 |
// Portions of code courtesy of Clifford Click |
42 |
||
43 |
// Optimization - Graph Style |
|
44 |
||
2016
c1f73fa547fe
6784930: server jvm fails with assert(!n->is_SpillCopy(),"")
kvn
parents:
1498
diff
changeset
|
45 |
// To avoid float value underflow |
c1f73fa547fe
6784930: server jvm fails with assert(!n->is_SpillCopy(),"")
kvn
parents:
1498
diff
changeset
|
46 |
#define MIN_BLOCK_FREQUENCY 1.e-35f |
c1f73fa547fe
6784930: server jvm fails with assert(!n->is_SpillCopy(),"")
kvn
parents:
1498
diff
changeset
|
47 |
|
1 | 48 |
//----------------------------schedule_node_into_block------------------------- |
49 |
// Insert node n into block b. Look for projections of n and make sure they |
|
50 |
// are in b also. |
|
51 |
void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { |
|
52 |
// Set basic block of n, Add n to b, |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
53 |
map_node_to_block(n, b); |
1 | 54 |
b->add_inst(n); |
55 |
||
56 |
// After Matching, nearly any old Node may have projections trailing it. |
|
57 |
// These are usually machine-dependent flags. In any case, they might |
|
58 |
// float to another block below this one. Move them up. |
|
59 |
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
|
60 |
Node* use = n->fast_out(i); |
|
61 |
if (use->is_Proj()) { |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
62 |
Block* buse = get_block_for_node(use); |
1 | 63 |
if (buse != b) { // In wrong block? |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
64 |
if (buse != NULL) { |
1 | 65 |
buse->find_remove(use); // Remove from wrong block |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
66 |
} |
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
67 |
map_node_to_block(use, b); |
1 | 68 |
b->add_inst(use); |
69 |
} |
|
70 |
} |
|
71 |
} |
|
72 |
} |
|
73 |
||
2127
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
74 |
//----------------------------replace_block_proj_ctrl------------------------- |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
75 |
// Nodes that have is_block_proj() nodes as their control need to use |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
76 |
// the appropriate Region for their actual block as their control since |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
77 |
// the projection will be in a predecessor block. |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
78 |
void PhaseCFG::replace_block_proj_ctrl( Node *n ) { |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
79 |
const Node *in0 = n->in(0); |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
80 |
assert(in0 != NULL, "Only control-dependent"); |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
81 |
const Node *p = in0->is_block_proj(); |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
82 |
if (p != NULL && p != n) { // Control from a block projection? |
11191 | 83 |
assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); |
2127
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
84 |
// Find trailing Region |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
85 |
Block *pb = get_block_for_node(in0); // Block-projection already has basic block |
2127
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
86 |
uint j = 0; |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
87 |
if (pb->_num_succs != 1) { // More then 1 successor? |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
88 |
// Search for successor |
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
89 |
uint max = pb->number_of_nodes(); |
2127
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
90 |
assert( max > 1, "" ); |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
91 |
uint start = max - pb->_num_succs; |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
92 |
// Find which output path belongs to projection |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
93 |
for (j = start; j < max; j++) { |
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
94 |
if( pb->get_node(j) == in0 ) |
2127
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
95 |
break; |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
96 |
} |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
97 |
assert( j < max, "must find" ); |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
98 |
// Change control to match head of successor basic block |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
99 |
j -= start; |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
100 |
} |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
101 |
n->set_req(0, pb->_succs[j]->head()); |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
102 |
} |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
103 |
} |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
104 |
|
35545
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
105 |
bool PhaseCFG::is_dominator(Node* dom_node, Node* node) { |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
106 |
if (dom_node == node) { |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
107 |
return true; |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
108 |
} |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
109 |
Block* d = get_block_for_node(dom_node); |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
110 |
Block* n = get_block_for_node(node); |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
111 |
if (d == n) { |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
112 |
if (dom_node->is_block_start()) { |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
113 |
return true; |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
114 |
} |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
115 |
if (node->is_block_start()) { |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
116 |
return false; |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
117 |
} |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
118 |
if (dom_node->is_block_proj()) { |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
119 |
return false; |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
120 |
} |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
121 |
if (node->is_block_proj()) { |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
122 |
return true; |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
123 |
} |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
124 |
#ifdef ASSERT |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
125 |
node->dump(); |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
126 |
dom_node->dump(); |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
127 |
#endif |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
128 |
fatal("unhandled"); |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
129 |
return false; |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
130 |
} |
30300
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
131 |
return d->dom_lca(n) == d; |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
132 |
} |
1 | 133 |
|
134 |
//------------------------------schedule_pinned_nodes-------------------------- |
|
135 |
// Set the basic block for Nodes pinned into blocks |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
136 |
void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { |
32202
7e7ad8b06f5b
8011858: Use Compile::live_nodes() instead of Compile::unique() in appropriate places
kvn
parents:
30629
diff
changeset
|
137 |
// Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc |
7e7ad8b06f5b
8011858: Use Compile::live_nodes() instead of Compile::unique() in appropriate places
kvn
parents:
30629
diff
changeset
|
138 |
GrowableArray <Node *> spstack(C->live_nodes() + 8); |
1 | 139 |
spstack.push(_root); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
140 |
while (spstack.is_nonempty()) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
141 |
Node* node = spstack.pop(); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
142 |
if (!visited.test_set(node->_idx)) { // Test node and flag it as visited |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
143 |
if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
144 |
assert(node->in(0), "pinned Node must have Control"); |
2127
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
145 |
// Before setting block replace block_proj control edge |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
146 |
replace_block_proj_ctrl(node); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
147 |
Node* input = node->in(0); |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
148 |
while (!input->is_block_start()) { |
1 | 149 |
input = input->in(0); |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
150 |
} |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
151 |
Block* block = get_block_for_node(input); // Basic block of controlling input |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
152 |
schedule_node_into_block(node, block); |
1 | 153 |
} |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
154 |
|
30300
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
155 |
// If the node has precedence edges (added when CastPP nodes are |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
156 |
// removed in final_graph_reshaping), fix the control of the |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
157 |
// node to cover the precedence edges and remove the |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
158 |
// dependencies. |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
159 |
Node* n = NULL; |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
160 |
for (uint i = node->len()-1; i >= node->req(); i--) { |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
161 |
Node* m = node->in(i); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
162 |
if (m == NULL) continue; |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
163 |
// Skip the precedence edge if the test that guarded a CastPP: |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
164 |
// - was optimized out during escape analysis |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
165 |
// (OptimizePtrCompare): the CastPP's control isn't an end of |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
166 |
// block. |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
167 |
// - is moved in the branch of a dominating If: the control of |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
168 |
// the CastPP is then a Region. |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
169 |
if (m->is_block_proj() || m->is_block_start()) { |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
170 |
node->rm_prec(i); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
171 |
if (n == NULL) { |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
172 |
n = m; |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
173 |
} else { |
35545
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
174 |
assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other"); |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
175 |
n = is_dominator(n, m) ? m : n; |
30300
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
176 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
177 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
178 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
179 |
if (n != NULL) { |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
180 |
assert(node->in(0), "control should have been set"); |
35545
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
181 |
assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other"); |
a8f29dfd62b2
8139771: Eliminating CastPP nodes at Phis when they all come from a unique input may cause crash
roland
parents:
35087
diff
changeset
|
182 |
if (!is_dominator(n, node->in(0))) { |
30300
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
183 |
node->set_req(0, n); |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
184 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
185 |
} |
4b12a5b40064
8069191: moving predicate out of loops may cause array accesses to bypass null check
roland
parents:
25715
diff
changeset
|
186 |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
187 |
// process all inputs that are non NULL |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
188 |
for (int i = node->req() - 1; i >= 0; --i) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
189 |
if (node->in(i) != NULL) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
190 |
spstack.push(node->in(i)); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
191 |
} |
1 | 192 |
} |
193 |
} |
|
194 |
} |
|
195 |
} |
|
196 |
||
197 |
#ifdef ASSERT |
|
198 |
// Assert that new input b2 is dominated by all previous inputs. |
|
199 |
// Check this by by seeing that it is dominated by b1, the deepest |
|
200 |
// input observed until b2. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
201 |
static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { |
1 | 202 |
if (b1 == NULL) return; |
203 |
assert(b1->_dom_depth < b2->_dom_depth, "sanity"); |
|
204 |
Block* tmp = b2; |
|
205 |
while (tmp != b1 && tmp != NULL) { |
|
206 |
tmp = tmp->_idom; |
|
207 |
} |
|
208 |
if (tmp != b1) { |
|
209 |
// Detected an unschedulable graph. Print some nice stuff and die. |
|
210 |
tty->print_cr("!!! Unschedulable graph !!!"); |
|
211 |
for (uint j=0; j<n->len(); j++) { // For all inputs |
|
212 |
Node* inn = n->in(j); // Get input |
|
213 |
if (inn == NULL) continue; // Ignore NULL, missing inputs |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
214 |
Block* inb = cfg->get_block_for_node(inn); |
1 | 215 |
tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, |
216 |
inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); |
|
217 |
inn->dump(); |
|
218 |
} |
|
219 |
tty->print("Failing node: "); |
|
220 |
n->dump(); |
|
221 |
assert(false, "unscheduable graph"); |
|
222 |
} |
|
223 |
} |
|
224 |
#endif |
|
225 |
||
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
226 |
static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { |
1 | 227 |
// Find the last input dominated by all other inputs. |
228 |
Block* deepb = NULL; // Deepest block so far |
|
229 |
int deepb_dom_depth = 0; |
|
230 |
for (uint k = 0; k < n->len(); k++) { // For all inputs |
|
231 |
Node* inn = n->in(k); // Get input |
|
232 |
if (inn == NULL) continue; // Ignore NULL, missing inputs |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
233 |
Block* inb = cfg->get_block_for_node(inn); |
1 | 234 |
assert(inb != NULL, "must already have scheduled this input"); |
235 |
if (deepb_dom_depth < (int) inb->_dom_depth) { |
|
236 |
// The new inb must be dominated by the previous deepb. |
|
237 |
// The various inputs must be linearly ordered in the dom |
|
238 |
// tree, or else there will not be a unique deepest block. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
239 |
DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); |
1 | 240 |
deepb = inb; // Save deepest block |
241 |
deepb_dom_depth = deepb->_dom_depth; |
|
242 |
} |
|
243 |
} |
|
244 |
assert(deepb != NULL, "must be at least one input to n"); |
|
245 |
return deepb; |
|
246 |
} |
|
247 |
||
248 |
||
249 |
//------------------------------schedule_early--------------------------------- |
|
250 |
// Find the earliest Block any instruction can be placed in. Some instructions |
|
251 |
// are pinned into Blocks. Unpinned instructions can appear in last block in |
|
252 |
// which all their inputs occur. |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
253 |
bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) { |
1 | 254 |
// Allocate stack with enough space to avoid frequent realloc |
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
255 |
Node_Stack nstack(roots.size() + 8); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
256 |
// _root will be processed among C->top() inputs |
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
257 |
roots.push(C->top(), 0); |
1 | 258 |
visited.set(C->top()->_idx); |
259 |
||
260 |
while (roots.size() != 0) { |
|
261 |
// Use local variables nstack_top_n & nstack_top_i to cache values |
|
262 |
// on stack's top. |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
263 |
Node* parent_node = roots.node(); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
264 |
uint input_index = 0; |
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
265 |
roots.pop(); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
266 |
|
1 | 267 |
while (true) { |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
268 |
if (input_index == 0) { |
2127
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
269 |
// Fixup some control. Constants without control get attached |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
270 |
// to root and nodes that use is_block_proj() nodes should be attached |
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
271 |
// to the region that starts their block. |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
272 |
const Node* control_input = parent_node->in(0); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
273 |
if (control_input != NULL) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
274 |
replace_block_proj_ctrl(parent_node); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
275 |
} else { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
276 |
// Is a constant with NO inputs? |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
277 |
if (parent_node->req() == 1) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
278 |
parent_node->set_req(0, _root); |
1 | 279 |
} |
280 |
} |
|
281 |
} |
|
282 |
||
283 |
// First, visit all inputs and force them to get a block. If an |
|
284 |
// input is already in a block we quit following inputs (to avoid |
|
285 |
// cycles). Instead we put that Node on a worklist to be handled |
|
286 |
// later (since IT'S inputs may not have a block yet). |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
287 |
|
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
288 |
// Assume all n's inputs will be processed |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
289 |
bool done = true; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
290 |
|
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
291 |
while (input_index < parent_node->len()) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
292 |
Node* in = parent_node->in(input_index++); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
293 |
if (in == NULL) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
294 |
continue; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
295 |
} |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
296 |
|
1 | 297 |
int is_visited = visited.test_set(in->_idx); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
298 |
if (!has_block(in)) { |
1 | 299 |
if (is_visited) { |
300 |
return false; |
|
301 |
} |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
302 |
// Save parent node and next input's index. |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
303 |
nstack.push(parent_node, input_index); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
304 |
// Process current input now. |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
305 |
parent_node = in; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
306 |
input_index = 0; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
307 |
// Not all n's inputs processed. |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
308 |
done = false; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
309 |
break; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
310 |
} else if (!is_visited) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
311 |
// Visit this guy later, using worklist |
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
312 |
roots.push(in, 0); |
1 | 313 |
} |
314 |
} |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
315 |
|
1 | 316 |
if (done) { |
317 |
// All of n's inputs have been processed, complete post-processing. |
|
318 |
||
319 |
// Some instructions are pinned into a block. These include Region, |
|
320 |
// Phi, Start, Return, and other control-dependent instructions and |
|
321 |
// any projections which depend on them. |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
322 |
if (!parent_node->pinned()) { |
1 | 323 |
// Set earliest legal block. |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
324 |
Block* earliest_block = find_deepest_input(parent_node, this); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
325 |
map_node_to_block(parent_node, earliest_block); |
2127
268ea58ed775
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
2016
diff
changeset
|
326 |
} else { |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
327 |
assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); |
1 | 328 |
} |
329 |
||
330 |
if (nstack.is_empty()) { |
|
331 |
// Finished all nodes on stack. |
|
332 |
// Process next node on the worklist 'roots'. |
|
333 |
break; |
|
334 |
} |
|
335 |
// Get saved parent node and next input's index. |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
336 |
parent_node = nstack.node(); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
337 |
input_index = nstack.index(); |
1 | 338 |
nstack.pop(); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
339 |
} |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
340 |
} |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
341 |
} |
1 | 342 |
return true; |
343 |
} |
|
344 |
||
345 |
//------------------------------dom_lca---------------------------------------- |
|
346 |
// Find least common ancestor in dominator tree |
|
347 |
// LCA is a current notion of LCA, to be raised above 'this'. |
|
348 |
// As a convenient boundary condition, return 'this' if LCA is NULL. |
|
349 |
// Find the LCA of those two nodes. |
|
350 |
Block* Block::dom_lca(Block* LCA) { |
|
351 |
if (LCA == NULL || LCA == this) return this; |
|
352 |
||
353 |
Block* anc = this; |
|
354 |
while (anc->_dom_depth > LCA->_dom_depth) |
|
355 |
anc = anc->_idom; // Walk up till anc is as high as LCA |
|
356 |
||
357 |
while (LCA->_dom_depth > anc->_dom_depth) |
|
358 |
LCA = LCA->_idom; // Walk up till LCA is as high as anc |
|
359 |
||
360 |
while (LCA != anc) { // Walk both up till they are the same |
|
361 |
LCA = LCA->_idom; |
|
362 |
anc = anc->_idom; |
|
363 |
} |
|
364 |
||
365 |
return LCA; |
|
366 |
} |
|
367 |
||
368 |
//--------------------------raise_LCA_above_use-------------------------------- |
|
369 |
// We are placing a definition, and have been given a def->use edge. |
|
370 |
// The definition must dominate the use, so move the LCA upward in the |
|
371 |
// dominator tree to dominate the use. If the use is a phi, adjust |
|
372 |
// the LCA only with the phi input paths which actually use this def. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
373 |
static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { |
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
374 |
Block* buse = cfg->get_block_for_node(use); |
1 | 375 |
if (buse == NULL) return LCA; // Unused killing Projs have no use block |
376 |
if (!use->is_Phi()) return buse->dom_lca(LCA); |
|
377 |
uint pmax = use->req(); // Number of Phi inputs |
|
378 |
// Why does not this loop just break after finding the matching input to |
|
379 |
// the Phi? Well...it's like this. I do not have true def-use/use-def |
|
380 |
// chains. Means I cannot distinguish, from the def-use direction, which |
|
381 |
// of many use-defs lead from the same use to the same def. That is, this |
|
382 |
// Phi might have several uses of the same def. Each use appears in a |
|
383 |
// different predecessor block. But when I enter here, I cannot distinguish |
|
384 |
// which use-def edge I should find the predecessor block for. So I find |
|
385 |
// them all. Means I do a little extra work if a Phi uses the same value |
|
386 |
// more than once. |
|
387 |
for (uint j=1; j<pmax; j++) { // For all inputs |
|
388 |
if (use->in(j) == def) { // Found matching input? |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
389 |
Block* pred = cfg->get_block_for_node(buse->pred(j)); |
1 | 390 |
LCA = pred->dom_lca(LCA); |
391 |
} |
|
392 |
} |
|
393 |
return LCA; |
|
394 |
} |
|
395 |
||
396 |
//----------------------------raise_LCA_above_marks---------------------------- |
|
397 |
// Return a new LCA that dominates LCA and any of its marked predecessors. |
|
398 |
// Search all my parents up to 'early' (exclusive), looking for predecessors |
|
399 |
// which are marked with the given index. Return the LCA (in the dom tree) |
|
400 |
// of all marked blocks. If there are none marked, return the original |
|
401 |
// LCA. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
402 |
static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { |
1 | 403 |
Block_List worklist; |
404 |
worklist.push(LCA); |
|
405 |
while (worklist.size() > 0) { |
|
406 |
Block* mid = worklist.pop(); |
|
407 |
if (mid == early) continue; // stop searching here |
|
408 |
||
409 |
// Test and set the visited bit. |
|
410 |
if (mid->raise_LCA_visited() == mark) continue; // already visited |
|
411 |
||
412 |
// Don't process the current LCA, otherwise the search may terminate early |
|
413 |
if (mid != LCA && mid->raise_LCA_mark() == mark) { |
|
414 |
// Raise the LCA. |
|
415 |
LCA = mid->dom_lca(LCA); |
|
416 |
if (LCA == early) break; // stop searching everywhere |
|
417 |
assert(early->dominates(LCA), "early is high enough"); |
|
418 |
// Resume searching at that point, skipping intermediate levels. |
|
419 |
worklist.push(LCA); |
|
761
312de898447e
6714694: assertion in 64bit server vm (store->find_edge(load) != -1,"missing precedence edge") with COOPs
kvn
parents:
204
diff
changeset
|
420 |
if (LCA == mid) |
312de898447e
6714694: assertion in 64bit server vm (store->find_edge(load) != -1,"missing precedence edge") with COOPs
kvn
parents:
204
diff
changeset
|
421 |
continue; // Don't mark as visited to avoid early termination. |
1 | 422 |
} else { |
423 |
// Keep searching through this block's predecessors. |
|
424 |
for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
425 |
Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); |
1 | 426 |
worklist.push(mid_parent); |
427 |
} |
|
428 |
} |
|
761
312de898447e
6714694: assertion in 64bit server vm (store->find_edge(load) != -1,"missing precedence edge") with COOPs
kvn
parents:
204
diff
changeset
|
429 |
mid->set_raise_LCA_visited(mark); |
1 | 430 |
} |
431 |
return LCA; |
|
432 |
} |
|
433 |
||
434 |
//--------------------------memory_early_block-------------------------------- |
|
435 |
// This is a variation of find_deepest_input, the heart of schedule_early. |
|
436 |
// Find the "early" block for a load, if we considered only memory and |
|
437 |
// address inputs, that is, if other data inputs were ignored. |
|
438 |
// |
|
439 |
// Because a subset of edges are considered, the resulting block will |
|
440 |
// be earlier (at a shallower dom_depth) than the true schedule_early |
|
441 |
// point of the node. We compute this earlier block as a more permissive |
|
442 |
// site for anti-dependency insertion, but only if subsume_loads is enabled. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
443 |
static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { |
1 | 444 |
Node* base; |
445 |
Node* index; |
|
446 |
Node* store = load->in(MemNode::Memory); |
|
447 |
load->as_Mach()->memory_inputs(base, index); |
|
448 |
||
449 |
assert(base != NodeSentinel && index != NodeSentinel, |
|
450 |
"unexpected base/index inputs"); |
|
451 |
||
452 |
Node* mem_inputs[4]; |
|
453 |
int mem_inputs_length = 0; |
|
454 |
if (base != NULL) mem_inputs[mem_inputs_length++] = base; |
|
455 |
if (index != NULL) mem_inputs[mem_inputs_length++] = index; |
|
456 |
if (store != NULL) mem_inputs[mem_inputs_length++] = store; |
|
457 |
||
458 |
// In the comparision below, add one to account for the control input, |
|
459 |
// which may be null, but always takes up a spot in the in array. |
|
460 |
if (mem_inputs_length + 1 < (int) load->req()) { |
|
461 |
// This "load" has more inputs than just the memory, base and index inputs. |
|
462 |
// For purposes of checking anti-dependences, we need to start |
|
463 |
// from the early block of only the address portion of the instruction, |
|
464 |
// and ignore other blocks that may have factored into the wider |
|
465 |
// schedule_early calculation. |
|
466 |
if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); |
|
467 |
||
468 |
Block* deepb = NULL; // Deepest block so far |
|
469 |
int deepb_dom_depth = 0; |
|
470 |
for (int i = 0; i < mem_inputs_length; i++) { |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
471 |
Block* inb = cfg->get_block_for_node(mem_inputs[i]); |
1 | 472 |
if (deepb_dom_depth < (int) inb->_dom_depth) { |
473 |
// The new inb must be dominated by the previous deepb. |
|
474 |
// The various inputs must be linearly ordered in the dom |
|
475 |
// tree, or else there will not be a unique deepest block. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
476 |
DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); |
1 | 477 |
deepb = inb; // Save deepest block |
478 |
deepb_dom_depth = deepb->_dom_depth; |
|
479 |
} |
|
480 |
} |
|
481 |
early = deepb; |
|
482 |
} |
|
483 |
||
484 |
return early; |
|
485 |
} |
|
486 |
||
487 |
//--------------------------insert_anti_dependences--------------------------- |
|
488 |
// A load may need to witness memory that nearby stores can overwrite. |
|
489 |
// For each nearby store, either insert an "anti-dependence" edge |
|
490 |
// from the load to the store, or else move LCA upward to force the |
|
491 |
// load to (eventually) be scheduled in a block above the store. |
|
492 |
// |
|
493 |
// Do not add edges to stores on distinct control-flow paths; |
|
494 |
// only add edges to stores which might interfere. |
|
495 |
// |
|
496 |
// Return the (updated) LCA. There will not be any possibly interfering |
|
497 |
// store between the load's "early block" and the updated LCA. |
|
498 |
// Any stores in the updated LCA will have new precedence edges |
|
499 |
// back to the load. The caller is expected to schedule the load |
|
500 |
// in the LCA, in which case the precedence edges will make LCM |
|
501 |
// preserve anti-dependences. The caller may also hoist the load |
|
502 |
// above the LCA, if it is not the early block. |
|
503 |
Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { |
|
504 |
assert(load->needs_anti_dependence_check(), "must be a load of some sort"); |
|
505 |
assert(LCA != NULL, ""); |
|
506 |
DEBUG_ONLY(Block* LCA_orig = LCA); |
|
507 |
||
508 |
// Compute the alias index. Loads and stores with different alias indices |
|
509 |
// do not need anti-dependence edges. |
|
30629
b6e5ad2f18d5
8076188: Optimize arraycopy out for non escaping destination
roland
parents:
30300
diff
changeset
|
510 |
int load_alias_idx = C->get_alias_index(load->adr_type()); |
1 | 511 |
#ifdef ASSERT |
512 |
if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && |
|
513 |
(PrintOpto || VerifyAliases || |
|
514 |
PrintMiscellaneous && (WizardMode || Verbose))) { |
|
515 |
// Load nodes should not consume all of memory. |
|
516 |
// Reporting a bottom type indicates a bug in adlc. |
|
517 |
// If some particular type of node validly consumes all of memory, |
|
518 |
// sharpen the preceding "if" to exclude it, so we can catch bugs here. |
|
519 |
tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); |
|
520 |
load->dump(2); |
|
521 |
if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); |
|
522 |
} |
|
523 |
#endif |
|
524 |
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), |
|
525 |
"String compare is only known 'load' that does not conflict with any stores"); |
|
2348 | 526 |
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), |
527 |
"String equals is a 'load' that does not conflict with any stores"); |
|
528 |
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), |
|
529 |
"String indexOf is a 'load' that does not conflict with any stores"); |
|
33628 | 530 |
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOfChar), |
531 |
"String indexOfChar is a 'load' that does not conflict with any stores"); |
|
2348 | 532 |
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), |
33628 | 533 |
"Arrays equals is a 'load' that does not conflict with any stores"); |
534 |
assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_HasNegatives), |
|
535 |
"HasNegatives is a 'load' that does not conflict with any stores"); |
|
1 | 536 |
|
537 |
if (!C->alias_type(load_alias_idx)->is_rewritable()) { |
|
538 |
// It is impossible to spoil this load by putting stores before it, |
|
539 |
// because we know that the stores will never update the value |
|
540 |
// which 'load' must witness. |
|
541 |
return LCA; |
|
542 |
} |
|
543 |
||
544 |
node_idx_t load_index = load->_idx; |
|
545 |
||
546 |
// Note the earliest legal placement of 'load', as determined by |
|
547 |
// by the unique point in the dom tree where all memory effects |
|
548 |
// and other inputs are first available. (Computed by schedule_early.) |
|
549 |
// For normal loads, 'early' is the shallowest place (dom graph wise) |
|
550 |
// to look for anti-deps between this load and any store. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
551 |
Block* early = get_block_for_node(load); |
1 | 552 |
|
553 |
// If we are subsuming loads, compute an "early" block that only considers |
|
554 |
// memory or address inputs. This block may be different than the |
|
555 |
// schedule_early block in that it could be at an even shallower depth in the |
|
556 |
// dominator tree, and allow for a broader discovery of anti-dependences. |
|
557 |
if (C->subsume_loads()) { |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
558 |
early = memory_early_block(load, early, this); |
1 | 559 |
} |
560 |
||
561 |
ResourceArea *area = Thread::current()->resource_area(); |
|
562 |
Node_List worklist_mem(area); // prior memory state to store |
|
563 |
Node_List worklist_store(area); // possible-def to explore |
|
204
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
564 |
Node_List worklist_visited(area); // visited mergemem nodes |
1 | 565 |
Node_List non_early_stores(area); // all relevant stores outside of early |
566 |
bool must_raise_LCA = false; |
|
567 |
||
568 |
#ifdef TRACK_PHI_INPUTS |
|
569 |
// %%% This extra checking fails because MergeMem nodes are not GVNed. |
|
570 |
// Provide "phi_inputs" to check if every input to a PhiNode is from the |
|
571 |
// original memory state. This indicates a PhiNode for which should not |
|
572 |
// prevent the load from sinking. For such a block, set_raise_LCA_mark |
|
573 |
// may be overly conservative. |
|
574 |
// Mechanism: count inputs seen for each Phi encountered in worklist_store. |
|
575 |
DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); |
|
576 |
#endif |
|
577 |
||
578 |
// 'load' uses some memory state; look for users of the same state. |
|
579 |
// Recurse through MergeMem nodes to the stores that use them. |
|
580 |
||
581 |
// Each of these stores is a possible definition of memory |
|
582 |
// that 'load' needs to use. We need to force 'load' |
|
583 |
// to occur before each such store. When the store is in |
|
584 |
// the same block as 'load', we insert an anti-dependence |
|
585 |
// edge load->store. |
|
586 |
||
587 |
// The relevant stores "nearby" the load consist of a tree rooted |
|
588 |
// at initial_mem, with internal nodes of type MergeMem. |
|
589 |
// Therefore, the branches visited by the worklist are of this form: |
|
590 |
// initial_mem -> (MergeMem ->)* store |
|
591 |
// The anti-dependence constraints apply only to the fringe of this tree. |
|
592 |
||
593 |
Node* initial_mem = load->in(MemNode::Memory); |
|
594 |
worklist_store.push(initial_mem); |
|
204
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
595 |
worklist_visited.push(initial_mem); |
1 | 596 |
worklist_mem.push(NULL); |
597 |
while (worklist_store.size() > 0) { |
|
598 |
// Examine a nearby store to see if it might interfere with our load. |
|
599 |
Node* mem = worklist_mem.pop(); |
|
600 |
Node* store = worklist_store.pop(); |
|
601 |
uint op = store->Opcode(); |
|
602 |
||
603 |
// MergeMems do not directly have anti-deps. |
|
604 |
// Treat them as internal nodes in a forward tree of memory states, |
|
605 |
// the leaves of which are each a 'possible-def'. |
|
606 |
if (store == initial_mem // root (exclusive) of tree we are searching |
|
607 |
|| op == Op_MergeMem // internal node of tree we are searching |
|
608 |
) { |
|
609 |
mem = store; // It's not a possibly interfering store. |
|
204
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
610 |
if (store == initial_mem) |
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
611 |
initial_mem = NULL; // only process initial memory once |
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
612 |
|
1 | 613 |
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
614 |
store = mem->fast_out(i); |
|
615 |
if (store->is_MergeMem()) { |
|
616 |
// Be sure we don't get into combinatorial problems. |
|
617 |
// (Allow phis to be repeated; they can merge two relevant states.) |
|
204
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
618 |
uint j = worklist_visited.size(); |
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
619 |
for (; j > 0; j--) { |
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
620 |
if (worklist_visited.at(j-1) == store) break; |
1 | 621 |
} |
204
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
622 |
if (j > 0) continue; // already on work list; do not repeat |
154149c3f7ba
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
1
diff
changeset
|
623 |
worklist_visited.push(store); |
1 | 624 |
} |
625 |
worklist_mem.push(mem); |
|
626 |
worklist_store.push(store); |
|
627 |
} |
|
628 |
continue; |
|
629 |
} |
|
630 |
||
631 |
if (op == Op_MachProj || op == Op_Catch) continue; |
|
632 |
if (store->needs_anti_dependence_check()) continue; // not really a store |
|
633 |
||
634 |
// Compute the alias index. Loads and stores with different alias |
|
635 |
// indices do not need anti-dependence edges. Wide MemBar's are |
|
636 |
// anti-dependent on everything (except immutable memories). |
|
637 |
const TypePtr* adr_type = store->adr_type(); |
|
638 |
if (!C->can_alias(adr_type, load_alias_idx)) continue; |
|
639 |
||
640 |
// Most slow-path runtime calls do NOT modify Java memory, but |
|
641 |
// they can block and so write Raw memory. |
|
642 |
if (store->is_Mach()) { |
|
643 |
MachNode* mstore = store->as_Mach(); |
|
644 |
if (load_alias_idx != Compile::AliasIdxRaw) { |
|
645 |
// Check for call into the runtime using the Java calling |
|
646 |
// convention (and from there into a wrapper); it has no |
|
647 |
// _method. Can't do this optimization for Native calls because |
|
648 |
// they CAN write to Java memory. |
|
649 |
if (mstore->ideal_Opcode() == Op_CallStaticJava) { |
|
650 |
assert(mstore->is_MachSafePoint(), ""); |
|
651 |
MachSafePointNode* ms = (MachSafePointNode*) mstore; |
|
652 |
assert(ms->is_MachCallJava(), ""); |
|
653 |
MachCallJavaNode* mcj = (MachCallJavaNode*) ms; |
|
654 |
if (mcj->_method == NULL) { |
|
655 |
// These runtime calls do not write to Java visible memory |
|
656 |
// (other than Raw) and so do not require anti-dependence edges. |
|
657 |
continue; |
|
658 |
} |
|
659 |
} |
|
660 |
// Same for SafePoints: they read/write Raw but only read otherwise. |
|
661 |
// This is basically a workaround for SafePoints only defining control |
|
662 |
// instead of control + memory. |
|
663 |
if (mstore->ideal_Opcode() == Op_SafePoint) |
|
664 |
continue; |
|
665 |
} else { |
|
666 |
// Some raw memory, such as the load of "top" at an allocation, |
|
667 |
// can be control dependent on the previous safepoint. See |
|
668 |
// comments in GraphKit::allocate_heap() about control input. |
|
669 |
// Inserting an anti-dep between such a safepoint and a use |
|
670 |
// creates a cycle, and will cause a subsequent failure in |
|
671 |
// local scheduling. (BugId 4919904) |
|
672 |
// (%%% How can a control input be a safepoint and not a projection??) |
|
673 |
if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) |
|
674 |
continue; |
|
675 |
} |
|
676 |
} |
|
677 |
||
678 |
// Identify a block that the current load must be above, |
|
679 |
// or else observe that 'store' is all the way up in the |
|
680 |
// earliest legal block for 'load'. In the latter case, |
|
681 |
// immediately insert an anti-dependence edge. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
682 |
Block* store_block = get_block_for_node(store); |
1 | 683 |
assert(store_block != NULL, "unused killing projections skipped above"); |
684 |
||
685 |
if (store->is_Phi()) { |
|
686 |
// 'load' uses memory which is one (or more) of the Phi's inputs. |
|
687 |
// It must be scheduled not before the Phi, but rather before |
|
688 |
// each of the relevant Phi inputs. |
|
689 |
// |
|
690 |
// Instead of finding the LCA of all inputs to a Phi that match 'mem', |
|
691 |
// we mark each corresponding predecessor block and do a combined |
|
692 |
// hoisting operation later (raise_LCA_above_marks). |
|
693 |
// |
|
694 |
// Do not assert(store_block != early, "Phi merging memory after access") |
|
695 |
// PhiNode may be at start of block 'early' with backedge to 'early' |
|
696 |
DEBUG_ONLY(bool found_match = false); |
|
697 |
for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { |
|
698 |
if (store->in(j) == mem) { // Found matching input? |
|
699 |
DEBUG_ONLY(found_match = true); |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
700 |
Block* pred_block = get_block_for_node(store_block->pred(j)); |
1 | 701 |
if (pred_block != early) { |
702 |
// If any predecessor of the Phi matches the load's "early block", |
|
703 |
// we do not need a precedence edge between the Phi and 'load' |
|
2131 | 704 |
// since the load will be forced into a block preceding the Phi. |
1 | 705 |
pred_block->set_raise_LCA_mark(load_index); |
706 |
assert(!LCA_orig->dominates(pred_block) || |
|
707 |
early->dominates(pred_block), "early is high enough"); |
|
708 |
must_raise_LCA = true; |
|
2875
549b4d80b29e
6843752: missing code for an anti-dependent Phi in GCM
kvn
parents:
2348
diff
changeset
|
709 |
} else { |
549b4d80b29e
6843752: missing code for an anti-dependent Phi in GCM
kvn
parents:
2348
diff
changeset
|
710 |
// anti-dependent upon PHI pinned below 'early', no edge needed |
549b4d80b29e
6843752: missing code for an anti-dependent Phi in GCM
kvn
parents:
2348
diff
changeset
|
711 |
LCA = early; // but can not schedule below 'early' |
1 | 712 |
} |
713 |
} |
|
714 |
} |
|
715 |
assert(found_match, "no worklist bug"); |
|
716 |
#ifdef TRACK_PHI_INPUTS |
|
717 |
#ifdef ASSERT |
|
718 |
// This assert asks about correct handling of PhiNodes, which may not |
|
719 |
// have all input edges directly from 'mem'. See BugId 4621264 |
|
720 |
int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; |
|
721 |
// Increment by exactly one even if there are multiple copies of 'mem' |
|
722 |
// coming into the phi, because we will run this block several times |
|
723 |
// if there are several copies of 'mem'. (That's how DU iterators work.) |
|
724 |
phi_inputs.at_put(store->_idx, num_mem_inputs); |
|
725 |
assert(PhiNode::Input + num_mem_inputs < store->req(), |
|
726 |
"Expect at least one phi input will not be from original memory state"); |
|
727 |
#endif //ASSERT |
|
728 |
#endif //TRACK_PHI_INPUTS |
|
729 |
} else if (store_block != early) { |
|
730 |
// 'store' is between the current LCA and earliest possible block. |
|
731 |
// Label its block, and decide later on how to raise the LCA |
|
732 |
// to include the effect on LCA of this store. |
|
733 |
// If this store's block gets chosen as the raised LCA, we |
|
734 |
// will find him on the non_early_stores list and stick him |
|
735 |
// with a precedence edge. |
|
736 |
// (But, don't bother if LCA is already raised all the way.) |
|
737 |
if (LCA != early) { |
|
738 |
store_block->set_raise_LCA_mark(load_index); |
|
739 |
must_raise_LCA = true; |
|
740 |
non_early_stores.push(store); |
|
741 |
} |
|
742 |
} else { |
|
743 |
// Found a possibly-interfering store in the load's 'early' block. |
|
744 |
// This means 'load' cannot sink at all in the dominator tree. |
|
745 |
// Add an anti-dep edge, and squeeze 'load' into the highest block. |
|
746 |
assert(store != load->in(0), "dependence cycle found"); |
|
747 |
if (verify) { |
|
748 |
assert(store->find_edge(load) != -1, "missing precedence edge"); |
|
749 |
} else { |
|
750 |
store->add_prec(load); |
|
751 |
} |
|
752 |
LCA = early; |
|
753 |
// This turns off the process of gathering non_early_stores. |
|
754 |
} |
|
755 |
} |
|
756 |
// (Worklist is now empty; all nearby stores have been visited.) |
|
757 |
||
758 |
// Finished if 'load' must be scheduled in its 'early' block. |
|
759 |
// If we found any stores there, they have already been given |
|
760 |
// precedence edges. |
|
761 |
if (LCA == early) return LCA; |
|
762 |
||
763 |
// We get here only if there are no possibly-interfering stores |
|
764 |
// in the load's 'early' block. Move LCA up above all predecessors |
|
765 |
// which contain stores we have noted. |
|
766 |
// |
|
767 |
// The raised LCA block can be a home to such interfering stores, |
|
768 |
// but its predecessors must not contain any such stores. |
|
769 |
// |
|
770 |
// The raised LCA will be a lower bound for placing the load, |
|
771 |
// preventing the load from sinking past any block containing |
|
772 |
// a store that may invalidate the memory state required by 'load'. |
|
773 |
if (must_raise_LCA) |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
774 |
LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); |
1 | 775 |
if (LCA == early) return LCA; |
776 |
||
777 |
// Insert anti-dependence edges from 'load' to each store |
|
778 |
// in the non-early LCA block. |
|
779 |
// Mine the non_early_stores list for such stores. |
|
780 |
if (LCA->raise_LCA_mark() == load_index) { |
|
781 |
while (non_early_stores.size() > 0) { |
|
782 |
Node* store = non_early_stores.pop(); |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
783 |
Block* store_block = get_block_for_node(store); |
1 | 784 |
if (store_block == LCA) { |
785 |
// add anti_dependence from store to load in its own block |
|
786 |
assert(store != load->in(0), "dependence cycle found"); |
|
787 |
if (verify) { |
|
788 |
assert(store->find_edge(load) != -1, "missing precedence edge"); |
|
789 |
} else { |
|
790 |
store->add_prec(load); |
|
791 |
} |
|
792 |
} else { |
|
793 |
assert(store_block->raise_LCA_mark() == load_index, "block was marked"); |
|
794 |
// Any other stores we found must be either inside the new LCA |
|
795 |
// or else outside the original LCA. In the latter case, they |
|
796 |
// did not interfere with any use of 'load'. |
|
797 |
assert(LCA->dominates(store_block) |
|
798 |
|| !LCA_orig->dominates(store_block), "no stray stores"); |
|
799 |
} |
|
800 |
} |
|
801 |
} |
|
802 |
||
803 |
// Return the highest block containing stores; any stores |
|
804 |
// within that block have been given anti-dependence edges. |
|
805 |
return LCA; |
|
806 |
} |
|
807 |
||
808 |
// This class is used to iterate backwards over the nodes in the graph. |
|
809 |
||
810 |
class Node_Backward_Iterator { |
|
811 |
||
812 |
private: |
|
813 |
Node_Backward_Iterator(); |
|
814 |
||
815 |
public: |
|
816 |
// Constructor for the iterator |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
817 |
Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg); |
1 | 818 |
|
819 |
// Postincrement operator to iterate over the nodes |
|
820 |
Node *next(); |
|
821 |
||
822 |
private: |
|
823 |
VectorSet &_visited; |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
824 |
Node_Stack &_stack; |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
825 |
PhaseCFG &_cfg; |
1 | 826 |
}; |
827 |
||
828 |
// Constructor for the Node_Backward_Iterator |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
829 |
Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg) |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
830 |
: _visited(visited), _stack(stack), _cfg(cfg) { |
1 | 831 |
// The stack should contain exactly the root |
832 |
stack.clear(); |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
833 |
stack.push(root, root->outcnt()); |
1 | 834 |
|
835 |
// Clear the visited bits |
|
836 |
visited.Clear(); |
|
837 |
} |
|
838 |
||
839 |
// Iterator for the Node_Backward_Iterator |
|
840 |
Node *Node_Backward_Iterator::next() { |
|
841 |
||
842 |
// If the _stack is empty, then just return NULL: finished. |
|
843 |
if ( !_stack.size() ) |
|
844 |
return NULL; |
|
845 |
||
846 |
// I visit unvisited not-anti-dependence users first, then anti-dependent |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
847 |
// children next. I iterate backwards to support removal of nodes. |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
848 |
// The stack holds states consisting of 3 values: |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
849 |
// current Def node, flag which indicates 1st/2nd pass, index of current out edge |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
850 |
Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1); |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
851 |
bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1); |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
852 |
uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes. |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
853 |
_stack.pop(); |
1 | 854 |
|
855 |
// I cycle here when I am entering a deeper level of recursion. |
|
856 |
// The key variable 'self' was set prior to jumping here. |
|
857 |
while( 1 ) { |
|
858 |
||
859 |
_visited.set(self->_idx); |
|
860 |
||
861 |
// Now schedule all uses as late as possible. |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
862 |
const Node* src = self->is_Proj() ? self->in(0) : self; |
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
863 |
uint src_rpo = _cfg.get_block_for_node(src)->_rpo; |
1 | 864 |
|
865 |
// Schedule all nodes in a post-order visit |
|
866 |
Node *unvisited = NULL; // Unvisited anti-dependent Node, if any |
|
867 |
||
868 |
// Scan for unvisited nodes |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
869 |
while (idx > 0) { |
1 | 870 |
// For all uses, schedule late |
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
871 |
Node* n = self->raw_out(--idx); // Use |
1 | 872 |
|
873 |
// Skip already visited children |
|
874 |
if ( _visited.test(n->_idx) ) |
|
875 |
continue; |
|
876 |
||
877 |
// do not traverse backward control edges |
|
878 |
Node *use = n->is_Proj() ? n->in(0) : n; |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
879 |
uint use_rpo = _cfg.get_block_for_node(use)->_rpo; |
1 | 880 |
|
881 |
if ( use_rpo < src_rpo ) |
|
882 |
continue; |
|
883 |
||
884 |
// Phi nodes always precede uses in a basic block |
|
885 |
if ( use_rpo == src_rpo && use->is_Phi() ) |
|
886 |
continue; |
|
887 |
||
888 |
unvisited = n; // Found unvisited |
|
889 |
||
890 |
// Check for possible-anti-dependent |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
891 |
// 1st pass: No such nodes, 2nd pass: Only such nodes. |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
892 |
if (n->needs_anti_dependence_check() == iterate_anti_dep) { |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
893 |
unvisited = n; // Found unvisited |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
894 |
break; |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
895 |
} |
1 | 896 |
} |
897 |
||
898 |
// Did I find an unvisited not-anti-dependent Node? |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
899 |
if (!unvisited) { |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
900 |
if (!iterate_anti_dep) { |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
901 |
// 2nd pass: Iterate over nodes which needs_anti_dependence_check. |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
902 |
iterate_anti_dep = true; |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
903 |
idx = self->outcnt(); |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
904 |
continue; |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
905 |
} |
1 | 906 |
break; // All done with children; post-visit 'self' |
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
907 |
} |
1 | 908 |
|
909 |
// Visit the unvisited Node. Contains the obvious push to |
|
910 |
// indicate I'm entering a deeper level of recursion. I push the |
|
911 |
// old state onto the _stack and set a new state and loop (recurse). |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
912 |
_stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx); |
1 | 913 |
self = unvisited; |
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
914 |
iterate_anti_dep = false; |
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
915 |
idx = self->outcnt(); |
1 | 916 |
} // End recursion loop |
917 |
||
918 |
return self; |
|
919 |
} |
|
920 |
||
921 |
//------------------------------ComputeLatenciesBackwards---------------------- |
|
922 |
// Compute the latency of all the instructions. |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
923 |
void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) { |
1 | 924 |
#ifndef PRODUCT |
925 |
if (trace_opto_pipelining()) |
|
926 |
tty->print("\n#---- ComputeLatenciesBackwards ----\n"); |
|
927 |
#endif |
|
928 |
||
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
929 |
Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); |
1 | 930 |
Node *n; |
931 |
||
932 |
// Walk over all the nodes from last to first |
|
933 |
while (n = iter.next()) { |
|
934 |
// Set the latency for the definitions of this instruction |
|
935 |
partial_latency_of_defs(n); |
|
936 |
} |
|
937 |
} // end ComputeLatenciesBackwards |
|
938 |
||
939 |
//------------------------------partial_latency_of_defs------------------------ |
|
940 |
// Compute the latency impact of this node on all defs. This computes |
|
941 |
// a number that increases as we approach the beginning of the routine. |
|
942 |
void PhaseCFG::partial_latency_of_defs(Node *n) { |
|
943 |
// Set the latency for this instruction |
|
944 |
#ifndef PRODUCT |
|
945 |
if (trace_opto_pipelining()) { |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
946 |
tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); |
1 | 947 |
dump(); |
948 |
} |
|
949 |
#endif |
|
950 |
||
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
951 |
if (n->is_Proj()) { |
1 | 952 |
n = n->in(0); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
953 |
} |
1 | 954 |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
955 |
if (n->is_Root()) { |
1 | 956 |
return; |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
957 |
} |
1 | 958 |
|
959 |
uint nlen = n->len(); |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
960 |
uint use_latency = get_latency_for_node(n); |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
961 |
uint use_pre_order = get_block_for_node(n)->_pre_order; |
1 | 962 |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
963 |
for (uint j = 0; j < nlen; j++) { |
1 | 964 |
Node *def = n->in(j); |
965 |
||
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
966 |
if (!def || def == n) { |
1 | 967 |
continue; |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
968 |
} |
1 | 969 |
|
970 |
// Walk backwards thru projections |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
971 |
if (def->is_Proj()) { |
1 | 972 |
def = def->in(0); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
973 |
} |
1 | 974 |
|
975 |
#ifndef PRODUCT |
|
976 |
if (trace_opto_pipelining()) { |
|
977 |
tty->print("# in(%2d): ", j); |
|
978 |
def->dump(); |
|
979 |
} |
|
980 |
#endif |
|
981 |
||
982 |
// If the defining block is not known, assume it is ok |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
983 |
Block *def_block = get_block_for_node(def); |
1 | 984 |
uint def_pre_order = def_block ? def_block->_pre_order : 0; |
985 |
||
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
986 |
if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { |
1 | 987 |
continue; |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
988 |
} |
1 | 989 |
|
990 |
uint delta_latency = n->latency(j); |
|
991 |
uint current_latency = delta_latency + use_latency; |
|
992 |
||
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
993 |
if (get_latency_for_node(def) < current_latency) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
994 |
set_latency_for_node(def, current_latency); |
1 | 995 |
} |
996 |
||
997 |
#ifndef PRODUCT |
|
998 |
if (trace_opto_pipelining()) { |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
999 |
tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); |
1 | 1000 |
} |
1001 |
#endif |
|
1002 |
} |
|
1003 |
} |
|
1004 |
||
1005 |
//------------------------------latency_from_use------------------------------- |
|
1006 |
// Compute the latency of a specific use |
|
1007 |
int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { |
|
1008 |
// If self-reference, return no latency |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1009 |
if (use == n || use->is_Root()) { |
1 | 1010 |
return 0; |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1011 |
} |
1 | 1012 |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1013 |
uint def_pre_order = get_block_for_node(def)->_pre_order; |
1 | 1014 |
uint latency = 0; |
1015 |
||
1016 |
// If the use is not a projection, then it is simple... |
|
1017 |
if (!use->is_Proj()) { |
|
1018 |
#ifndef PRODUCT |
|
1019 |
if (trace_opto_pipelining()) { |
|
1020 |
tty->print("# out(): "); |
|
1021 |
use->dump(); |
|
1022 |
} |
|
1023 |
#endif |
|
1024 |
||
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1025 |
uint use_pre_order = get_block_for_node(use)->_pre_order; |
1 | 1026 |
|
1027 |
if (use_pre_order < def_pre_order) |
|
1028 |
return 0; |
|
1029 |
||
1030 |
if (use_pre_order == def_pre_order && use->is_Phi()) |
|
1031 |
return 0; |
|
1032 |
||
1033 |
uint nlen = use->len(); |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1034 |
uint nl = get_latency_for_node(use); |
1 | 1035 |
|
1036 |
for ( uint j=0; j<nlen; j++ ) { |
|
1037 |
if (use->in(j) == n) { |
|
1038 |
// Change this if we want local latencies |
|
1039 |
uint ul = use->latency(j); |
|
1040 |
uint l = ul + nl; |
|
1041 |
if (latency < l) latency = l; |
|
1042 |
#ifndef PRODUCT |
|
1043 |
if (trace_opto_pipelining()) { |
|
1044 |
tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", |
|
1045 |
nl, j, ul, l, latency); |
|
1046 |
} |
|
1047 |
#endif |
|
1048 |
} |
|
1049 |
} |
|
1050 |
} else { |
|
1051 |
// This is a projection, just grab the latency of the use(s) |
|
1052 |
for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { |
|
1053 |
uint l = latency_from_use(use, def, use->fast_out(j)); |
|
1054 |
if (latency < l) latency = l; |
|
1055 |
} |
|
1056 |
} |
|
1057 |
||
1058 |
return latency; |
|
1059 |
} |
|
1060 |
||
1061 |
//------------------------------latency_from_uses------------------------------ |
|
1062 |
// Compute the latency of this instruction relative to all of it's uses. |
|
1063 |
// This computes a number that increases as we approach the beginning of the |
|
1064 |
// routine. |
|
1065 |
void PhaseCFG::latency_from_uses(Node *n) { |
|
1066 |
// Set the latency for this instruction |
|
1067 |
#ifndef PRODUCT |
|
1068 |
if (trace_opto_pipelining()) { |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1069 |
tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); |
1 | 1070 |
dump(); |
1071 |
} |
|
1072 |
#endif |
|
1073 |
uint latency=0; |
|
1074 |
const Node *def = n->is_Proj() ? n->in(0): n; |
|
1075 |
||
1076 |
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
|
1077 |
uint l = latency_from_use(n, def, n->fast_out(i)); |
|
1078 |
||
1079 |
if (latency < l) latency = l; |
|
1080 |
} |
|
1081 |
||
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1082 |
set_latency_for_node(n, latency); |
1 | 1083 |
} |
1084 |
||
1085 |
//------------------------------hoist_to_cheaper_block------------------------- |
|
1086 |
// Pick a block for node self, between early and LCA, that is a cheaper |
|
1087 |
// alternative to LCA. |
|
1088 |
Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { |
|
1089 |
const double delta = 1+PROB_UNLIKELY_MAG(4); |
|
1090 |
Block* least = LCA; |
|
1091 |
double least_freq = least->_freq; |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1092 |
uint target = get_latency_for_node(self); |
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1093 |
uint start_latency = get_latency_for_node(LCA->head()); |
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1094 |
uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); |
1 | 1095 |
bool in_latency = (target <= start_latency); |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1096 |
const Block* root_block = get_block_for_node(_root); |
1 | 1097 |
|
1098 |
// Turn off latency scheduling if scheduling is just plain off |
|
1099 |
if (!C->do_scheduling()) |
|
1100 |
in_latency = true; |
|
1101 |
||
1102 |
// Do not hoist (to cover latency) instructions which target a |
|
1103 |
// single register. Hoisting stretches the live range of the |
|
1104 |
// single register and may force spilling. |
|
1105 |
MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; |
|
1106 |
if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) |
|
1107 |
in_latency = true; |
|
1108 |
||
1109 |
#ifndef PRODUCT |
|
1110 |
if (trace_opto_pipelining()) { |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1111 |
tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); |
1 | 1112 |
self->dump(); |
1113 |
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", |
|
1114 |
LCA->_pre_order, |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1115 |
LCA->head()->_idx, |
1 | 1116 |
start_latency, |
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1117 |
LCA->get_node(LCA->end_idx())->_idx, |
1 | 1118 |
end_latency, |
1119 |
least_freq); |
|
1120 |
} |
|
1121 |
#endif |
|
1122 |
||
15871
b04dd94da4e6
8009120: Fuzz instruction scheduling in HotSpot compilers
shade
parents:
14623
diff
changeset
|
1123 |
int cand_cnt = 0; // number of candidates tried |
b04dd94da4e6
8009120: Fuzz instruction scheduling in HotSpot compilers
shade
parents:
14623
diff
changeset
|
1124 |
|
1 | 1125 |
// Walk up the dominator tree from LCA (Lowest common ancestor) to |
1126 |
// the earliest legal location. Capture the least execution frequency. |
|
1127 |
while (LCA != early) { |
|
1128 |
LCA = LCA->_idom; // Follow up the dominator tree |
|
1129 |
||
1130 |
if (LCA == NULL) { |
|
1131 |
// Bailout without retry |
|
1132 |
C->record_method_not_compilable("late schedule failed: LCA == NULL"); |
|
1133 |
return least; |
|
1134 |
} |
|
1135 |
||
1136 |
// Don't hoist machine instructions to the root basic block |
|
1137 |
if (mach && LCA == root_block) |
|
1138 |
break; |
|
1139 |
||
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1140 |
uint start_lat = get_latency_for_node(LCA->head()); |
1 | 1141 |
uint end_idx = LCA->end_idx(); |
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1142 |
uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); |
1 | 1143 |
double LCA_freq = LCA->_freq; |
1144 |
#ifndef PRODUCT |
|
1145 |
if (trace_opto_pipelining()) { |
|
1146 |
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1147 |
LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); |
1 | 1148 |
} |
1149 |
#endif |
|
15871
b04dd94da4e6
8009120: Fuzz instruction scheduling in HotSpot compilers
shade
parents:
14623
diff
changeset
|
1150 |
cand_cnt++; |
1 | 1151 |
if (LCA_freq < least_freq || // Better Frequency |
15871
b04dd94da4e6
8009120: Fuzz instruction scheduling in HotSpot compilers
shade
parents:
14623
diff
changeset
|
1152 |
(StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode |
b04dd94da4e6
8009120: Fuzz instruction scheduling in HotSpot compilers
shade
parents:
14623
diff
changeset
|
1153 |
(!StressGCM && // Otherwise, choose with latency |
b04dd94da4e6
8009120: Fuzz instruction scheduling in HotSpot compilers
shade
parents:
14623
diff
changeset
|
1154 |
!in_latency && // No block containing latency |
1 | 1155 |
LCA_freq < least_freq * delta && // No worse frequency |
1156 |
target >= end_lat && // within latency range |
|
1157 |
!self->is_iteratively_computed() ) // But don't hoist IV increments |
|
1158 |
// because they may end up above other uses of their phi forcing |
|
1159 |
// their result register to be different from their input. |
|
1160 |
) { |
|
1161 |
least = LCA; // Found cheaper block |
|
1162 |
least_freq = LCA_freq; |
|
1163 |
start_latency = start_lat; |
|
1164 |
end_latency = end_lat; |
|
1165 |
if (target <= start_lat) |
|
1166 |
in_latency = true; |
|
1167 |
} |
|
1168 |
} |
|
1169 |
||
1170 |
#ifndef PRODUCT |
|
1171 |
if (trace_opto_pipelining()) { |
|
1172 |
tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", |
|
1173 |
least->_pre_order, start_latency, least_freq); |
|
1174 |
} |
|
1175 |
#endif |
|
1176 |
||
1177 |
// See if the latency needs to be updated |
|
1178 |
if (target < end_latency) { |
|
1179 |
#ifndef PRODUCT |
|
1180 |
if (trace_opto_pipelining()) { |
|
1181 |
tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); |
|
1182 |
} |
|
1183 |
#endif |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1184 |
set_latency_for_node(self, end_latency); |
1 | 1185 |
partial_latency_of_defs(self); |
1186 |
} |
|
1187 |
||
1188 |
return least; |
|
1189 |
} |
|
1190 |
||
1191 |
||
1192 |
//------------------------------schedule_late----------------------------------- |
|
1193 |
// Now schedule all codes as LATE as possible. This is the LCA in the |
|
1194 |
// dominator tree of all USES of a value. Pick the block with the least |
|
1195 |
// loop nesting depth that is lowest in the dominator tree. |
|
1196 |
extern const char must_clone[]; |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
1197 |
void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) { |
1 | 1198 |
#ifndef PRODUCT |
1199 |
if (trace_opto_pipelining()) |
|
1200 |
tty->print("\n#---- schedule_late ----\n"); |
|
1201 |
#endif |
|
1202 |
||
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1203 |
Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); |
1 | 1204 |
Node *self; |
1205 |
||
1206 |
// Walk over all the nodes from last to first |
|
1207 |
while (self = iter.next()) { |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1208 |
Block* early = get_block_for_node(self); // Earliest legal placement |
1 | 1209 |
|
1210 |
if (self->is_top()) { |
|
1211 |
// Top node goes in bb #2 with other constants. |
|
1212 |
// It must be special-cased, because it has no out edges. |
|
1213 |
early->add_inst(self); |
|
1214 |
continue; |
|
1215 |
} |
|
1216 |
||
1217 |
// No uses, just terminate |
|
1218 |
if (self->outcnt() == 0) { |
|
10255 | 1219 |
assert(self->is_MachProj(), "sanity"); |
1 | 1220 |
continue; // Must be a dead machine projection |
1221 |
} |
|
1222 |
||
1223 |
// If node is pinned in the block, then no scheduling can be done. |
|
1224 |
if( self->pinned() ) // Pinned in block? |
|
1225 |
continue; |
|
1226 |
||
1227 |
MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; |
|
1228 |
if (mach) { |
|
1229 |
switch (mach->ideal_Opcode()) { |
|
1230 |
case Op_CreateEx: |
|
1231 |
// Don't move exception creation |
|
1232 |
early->add_inst(self); |
|
1233 |
continue; |
|
1234 |
break; |
|
1235 |
case Op_CheckCastPP: |
|
1236 |
// Don't move CheckCastPP nodes away from their input, if the input |
|
1237 |
// is a rawptr (5071820). |
|
1238 |
Node *def = self->in(1); |
|
1239 |
if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { |
|
1240 |
early->add_inst(self); |
|
3186
11ba3d09bd0e
6840775: Multiple JVM crashes seen with 1.6.0_10 through 1.6.0_14
kvn
parents:
2875
diff
changeset
|
1241 |
#ifdef ASSERT |
11ba3d09bd0e
6840775: Multiple JVM crashes seen with 1.6.0_10 through 1.6.0_14
kvn
parents:
2875
diff
changeset
|
1242 |
_raw_oops.push(def); |
11ba3d09bd0e
6840775: Multiple JVM crashes seen with 1.6.0_10 through 1.6.0_14
kvn
parents:
2875
diff
changeset
|
1243 |
#endif |
1 | 1244 |
continue; |
1245 |
} |
|
1246 |
break; |
|
1247 |
} |
|
1248 |
} |
|
1249 |
||
1250 |
// Gather LCA of all uses |
|
1251 |
Block *LCA = NULL; |
|
1252 |
{ |
|
1253 |
for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { |
|
1254 |
// For all uses, find LCA |
|
1255 |
Node* use = self->fast_out(i); |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1256 |
LCA = raise_LCA_above_use(LCA, use, self, this); |
1 | 1257 |
} |
1258 |
} // (Hide defs of imax, i from rest of block.) |
|
1259 |
||
1260 |
// Place temps in the block of their use. This isn't a |
|
1261 |
// requirement for correctness but it reduces useless |
|
1262 |
// interference between temps and other nodes. |
|
1263 |
if (mach != NULL && mach->is_MachTemp()) { |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1264 |
map_node_to_block(self, LCA); |
1 | 1265 |
LCA->add_inst(self); |
1266 |
continue; |
|
1267 |
} |
|
1268 |
||
1269 |
// Check if 'self' could be anti-dependent on memory |
|
1270 |
if (self->needs_anti_dependence_check()) { |
|
1271 |
// Hoist LCA above possible-defs and insert anti-dependences to |
|
1272 |
// defs in new LCA block. |
|
1273 |
LCA = insert_anti_dependences(LCA, self); |
|
1274 |
} |
|
1275 |
||
1276 |
if (early->_dom_depth > LCA->_dom_depth) { |
|
1277 |
// Somehow the LCA has moved above the earliest legal point. |
|
1278 |
// (One way this can happen is via memory_early_block.) |
|
1279 |
if (C->subsume_loads() == true && !C->failing()) { |
|
1280 |
// Retry with subsume_loads == false |
|
1281 |
// If this is the first failure, the sentinel string will "stick" |
|
1282 |
// to the Compile object, and the C2Compiler will see it and retry. |
|
1283 |
C->record_failure(C2Compiler::retry_no_subsuming_loads()); |
|
1284 |
} else { |
|
1285 |
// Bailout without retry when (early->_dom_depth > LCA->_dom_depth) |
|
1286 |
C->record_method_not_compilable("late schedule failed: incorrect graph"); |
|
1287 |
} |
|
1288 |
return; |
|
1289 |
} |
|
1290 |
||
1291 |
// If there is no opportunity to hoist, then we're done. |
|
15871
b04dd94da4e6
8009120: Fuzz instruction scheduling in HotSpot compilers
shade
parents:
14623
diff
changeset
|
1292 |
// In stress mode, try to hoist even the single operations. |
b04dd94da4e6
8009120: Fuzz instruction scheduling in HotSpot compilers
shade
parents:
14623
diff
changeset
|
1293 |
bool try_to_hoist = StressGCM || (LCA != early); |
1 | 1294 |
|
1295 |
// Must clone guys stay next to use; no hoisting allowed. |
|
1296 |
// Also cannot hoist guys that alter memory or are otherwise not |
|
1297 |
// allocatable (hoisting can make a value live longer, leading to |
|
1298 |
// anti and output dependency problems which are normally resolved |
|
1299 |
// by the register allocator giving everyone a different register). |
|
1300 |
if (mach != NULL && must_clone[mach->ideal_Opcode()]) |
|
1301 |
try_to_hoist = false; |
|
1302 |
||
1303 |
Block* late = NULL; |
|
1304 |
if (try_to_hoist) { |
|
1305 |
// Now find the block with the least execution frequency. |
|
1306 |
// Start at the latest schedule and work up to the earliest schedule |
|
1307 |
// in the dominator tree. Thus the Node will dominate all its uses. |
|
1308 |
late = hoist_to_cheaper_block(LCA, early, self); |
|
1309 |
} else { |
|
1310 |
// Just use the LCA of the uses. |
|
1311 |
late = LCA; |
|
1312 |
} |
|
1313 |
||
1314 |
// Put the node into target block |
|
1315 |
schedule_node_into_block(self, late); |
|
1316 |
||
1317 |
#ifdef ASSERT |
|
1318 |
if (self->needs_anti_dependence_check()) { |
|
1319 |
// since precedence edges are only inserted when we're sure they |
|
1320 |
// are needed make sure that after placement in a block we don't |
|
1321 |
// need any new precedence edges. |
|
1322 |
verify_anti_dependences(late, self); |
|
1323 |
} |
|
1324 |
#endif |
|
1325 |
} // Loop until all nodes have been visited |
|
1326 |
||
1327 |
} // end ScheduleLate |
|
1328 |
||
1329 |
//------------------------------GlobalCodeMotion------------------------------- |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1330 |
void PhaseCFG::global_code_motion() { |
1 | 1331 |
ResourceMark rm; |
1332 |
||
1333 |
#ifndef PRODUCT |
|
1334 |
if (trace_opto_pipelining()) { |
|
1335 |
tty->print("\n---- Start GlobalCodeMotion ----\n"); |
|
1336 |
} |
|
1337 |
#endif |
|
1338 |
||
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1339 |
// Initialize the node to block mapping for things on the proj_list |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1340 |
for (uint i = 0; i < _matcher.number_of_projections(); i++) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1341 |
unmap_node_from_block(_matcher.get_projection(i)); |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1342 |
} |
1 | 1343 |
|
1344 |
// Set the basic block for Nodes pinned into blocks |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1345 |
Arena* arena = Thread::current()->resource_area(); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1346 |
VectorSet visited(arena); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1347 |
schedule_pinned_nodes(visited); |
1 | 1348 |
|
1349 |
// Find the earliest Block any instruction can be placed in. Some |
|
1350 |
// instructions are pinned into Blocks. Unpinned instructions can |
|
1351 |
// appear in last block in which all their inputs occur. |
|
1352 |
visited.Clear(); |
|
35087
bdc3835a6e59
8136445: Performance issue with Nashorn and C2's global code motion
mdoerr
parents:
33628
diff
changeset
|
1353 |
Node_Stack stack(arena, (C->live_nodes() >> 2) + 16); // pre-grow |
1 | 1354 |
if (!schedule_early(visited, stack)) { |
1355 |
// Bailout without retry |
|
1356 |
C->record_method_not_compilable("early schedule failed"); |
|
1357 |
return; |
|
1358 |
} |
|
1359 |
||
1360 |
// Build Def-Use edges. |
|
1361 |
// Compute the latency information (via backwards walk) for all the |
|
1362 |
// instructions in the graph |
|
6180 | 1363 |
_node_latency = new GrowableArray<uint>(); // resource_area allocation |
1 | 1364 |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1365 |
if (C->do_scheduling()) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1366 |
compute_latencies_backwards(visited, stack); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1367 |
} |
1 | 1368 |
|
1369 |
// Now schedule all codes as LATE as possible. This is the LCA in the |
|
1370 |
// dominator tree of all USES of a value. Pick the block with the least |
|
1371 |
// loop nesting depth that is lowest in the dominator tree. |
|
1372 |
// ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) |
|
1373 |
schedule_late(visited, stack); |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1374 |
if (C->failing()) { |
1 | 1375 |
// schedule_late fails only when graph is incorrect. |
1376 |
assert(!VerifyGraphEdges, "verification should have failed"); |
|
1377 |
return; |
|
1378 |
} |
|
1379 |
||
1380 |
#ifndef PRODUCT |
|
1381 |
if (trace_opto_pipelining()) { |
|
1382 |
tty->print("\n---- Detect implicit null checks ----\n"); |
|
1383 |
} |
|
1384 |
#endif |
|
1385 |
||
1386 |
// Detect implicit-null-check opportunities. Basically, find NULL checks |
|
1387 |
// with suitable memory ops nearby. Use the memory op to do the NULL check. |
|
1388 |
// I can generate a memory op if there is not one nearby. |
|
1389 |
if (C->is_method_compilation()) { |
|
1390 |
// By reversing the loop direction we get a very minor gain on mpegaudio. |
|
1391 |
// Feel free to revert to a forward loop for clarity. |
|
1392 |
// for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1393 |
for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1394 |
Node* proj = _matcher._null_check_tests[i]; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1395 |
Node* val = _matcher._null_check_tests[i + 1]; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1396 |
Block* block = get_block_for_node(proj); |
22856
03ad2cf18166
8029015: PPC64 (part 216): opto: trap based null and range checks
goetz
parents:
22838
diff
changeset
|
1397 |
implicit_null_check(block, proj, val, C->allowed_deopt_reasons()); |
1 | 1398 |
// The implicit_null_check will only perform the transformation |
1399 |
// if the null branch is truly uncommon, *and* it leads to an |
|
1400 |
// uncommon trap. Combined with the too_many_traps guards |
|
1401 |
// above, this prevents SEGV storms reported in 6366351, |
|
1402 |
// by recompiling offending methods without this optimization. |
|
1403 |
} |
|
1404 |
} |
|
1405 |
||
33065 | 1406 |
bool block_size_threshold_ok = false; |
1407 |
intptr_t *recalc_pressure_nodes = NULL; |
|
1408 |
if (OptoRegScheduling) { |
|
1409 |
for (uint i = 0; i < number_of_blocks(); i++) { |
|
1410 |
Block* block = get_block(i); |
|
1411 |
if (block->number_of_nodes() > 10) { |
|
1412 |
block_size_threshold_ok = true; |
|
1413 |
break; |
|
1414 |
} |
|
1415 |
} |
|
1416 |
} |
|
1417 |
||
1418 |
// Enabling the scheduler for register pressure plus finding blocks of size to schedule for it |
|
1419 |
// is key to enabling this feature. |
|
1420 |
PhaseChaitin regalloc(C->unique(), *this, _matcher, true); |
|
1421 |
ResourceArea live_arena; // Arena for liveness |
|
1422 |
ResourceMark rm_live(&live_arena); |
|
1423 |
PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true); |
|
1424 |
PhaseIFG ifg(&live_arena); |
|
1425 |
if (OptoRegScheduling && block_size_threshold_ok) { |
|
1426 |
regalloc.mark_ssa(); |
|
1427 |
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); |
|
1428 |
rm_live.reset_to_mark(); // Reclaim working storage |
|
1429 |
IndexSet::reset_memory(C, &live_arena); |
|
1430 |
uint node_size = regalloc._lrg_map.max_lrg_id(); |
|
1431 |
ifg.init(node_size); // Empty IFG |
|
1432 |
regalloc.set_ifg(ifg); |
|
1433 |
regalloc.set_live(live); |
|
1434 |
regalloc.gather_lrg_masks(false); // Collect LRG masks |
|
1435 |
live.compute(node_size); // Compute liveness |
|
1436 |
||
1437 |
recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size); |
|
1438 |
for (uint i = 0; i < node_size; i++) { |
|
1439 |
recalc_pressure_nodes[i] = 0; |
|
1440 |
} |
|
1441 |
} |
|
1442 |
_regalloc = ®alloc; |
|
1443 |
||
1 | 1444 |
#ifndef PRODUCT |
1445 |
if (trace_opto_pipelining()) { |
|
1446 |
tty->print("\n---- Start Local Scheduling ----\n"); |
|
1447 |
} |
|
1448 |
#endif |
|
1449 |
||
1450 |
// Schedule locally. Right now a simple topological sort. |
|
1451 |
// Later, do a real latency aware scheduler. |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1452 |
GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); |
1 | 1453 |
visited.Clear(); |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1454 |
for (uint i = 0; i < number_of_blocks(); i++) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1455 |
Block* block = get_block(i); |
33065 | 1456 |
if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) { |
1 | 1457 |
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { |
1458 |
C->record_method_not_compilable("local schedule failed"); |
|
1459 |
} |
|
33065 | 1460 |
_regalloc = NULL; |
1 | 1461 |
return; |
1462 |
} |
|
1463 |
} |
|
33065 | 1464 |
_regalloc = NULL; |
1 | 1465 |
|
1466 |
// If we inserted any instructions between a Call and his CatchNode, |
|
1467 |
// clone the instructions on all paths below the Catch. |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1468 |
for (uint i = 0; i < number_of_blocks(); i++) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1469 |
Block* block = get_block(i); |
19721
8ecbb2cdc965
8023988: Move local scheduling of nodes to the CFG creation and code motion phase (PhaseCFG)
adlertz
parents:
19717
diff
changeset
|
1470 |
call_catch_cleanup(block); |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1471 |
} |
1 | 1472 |
|
1473 |
#ifndef PRODUCT |
|
1474 |
if (trace_opto_pipelining()) { |
|
1475 |
tty->print("\n---- After GlobalCodeMotion ----\n"); |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1476 |
for (uint i = 0; i < number_of_blocks(); i++) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1477 |
Block* block = get_block(i); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1478 |
block->dump(); |
1 | 1479 |
} |
1480 |
} |
|
1481 |
#endif |
|
6180 | 1482 |
// Dead. |
1483 |
_node_latency = (GrowableArray<uint> *)0xdeadbeef; |
|
1 | 1484 |
} |
1485 |
||
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1486 |
bool PhaseCFG::do_global_code_motion() { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1487 |
|
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1488 |
build_dominator_tree(); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1489 |
if (C->failing()) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1490 |
return false; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1491 |
} |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1492 |
|
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1493 |
NOT_PRODUCT( C->verify_graph_edges(); ) |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1494 |
|
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1495 |
estimate_block_frequency(); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1496 |
|
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1497 |
global_code_motion(); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1498 |
|
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1499 |
if (C->failing()) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1500 |
return false; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1501 |
} |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1502 |
|
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1503 |
return true; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1504 |
} |
1 | 1505 |
|
1506 |
//------------------------------Estimate_Block_Frequency----------------------- |
|
1507 |
// Estimate block frequencies based on IfNode probabilities. |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1508 |
void PhaseCFG::estimate_block_frequency() { |
1498 | 1509 |
|
1510 |
// Force conditional branches leading to uncommon traps to be unlikely, |
|
1511 |
// not because we get to the uncommon_trap with less relative frequency, |
|
1512 |
// but because an uncommon_trap typically causes a deopt, so we only get |
|
1513 |
// there once. |
|
1514 |
if (C->do_freq_based_layout()) { |
|
1515 |
Block_List worklist; |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1516 |
Block* root_blk = get_block(0); |
1498 | 1517 |
for (uint i = 1; i < root_blk->num_preds(); i++) { |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1518 |
Block *pb = get_block_for_node(root_blk->pred(i)); |
1498 | 1519 |
if (pb->has_uncommon_code()) { |
1520 |
worklist.push(pb); |
|
1521 |
} |
|
1522 |
} |
|
1523 |
while (worklist.size() > 0) { |
|
1524 |
Block* uct = worklist.pop(); |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1525 |
if (uct == get_root_block()) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1526 |
continue; |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1527 |
} |
1498 | 1528 |
for (uint i = 1; i < uct->num_preds(); i++) { |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1529 |
Block *pb = get_block_for_node(uct->pred(i)); |
1498 | 1530 |
if (pb->_num_succs == 1) { |
1531 |
worklist.push(pb); |
|
1532 |
} else if (pb->num_fall_throughs() == 2) { |
|
1533 |
pb->update_uncommon_branch(uct); |
|
1534 |
} |
|
1535 |
} |
|
1536 |
} |
|
1537 |
} |
|
1 | 1538 |
|
1539 |
// Create the loop tree and calculate loop depth. |
|
1540 |
_root_loop = create_loop_tree(); |
|
1541 |
_root_loop->compute_loop_depth(0); |
|
1542 |
||
1543 |
// Compute block frequency of each block, relative to a single loop entry. |
|
1544 |
_root_loop->compute_freq(); |
|
1545 |
||
1546 |
// Adjust all frequencies to be relative to a single method entry |
|
1498 | 1547 |
_root_loop->_freq = 1.0; |
1 | 1548 |
_root_loop->scale_freq(); |
1549 |
||
2340 | 1550 |
// Save outmost loop frequency for LRG frequency threshold |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1551 |
_outer_loop_frequency = _root_loop->outer_loop_freq(); |
2340 | 1552 |
|
1 | 1553 |
// force paths ending at uncommon traps to be infrequent |
1498 | 1554 |
if (!C->do_freq_based_layout()) { |
1555 |
Block_List worklist; |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1556 |
Block* root_blk = get_block(0); |
1498 | 1557 |
for (uint i = 1; i < root_blk->num_preds(); i++) { |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1558 |
Block *pb = get_block_for_node(root_blk->pred(i)); |
1498 | 1559 |
if (pb->has_uncommon_code()) { |
1560 |
worklist.push(pb); |
|
1561 |
} |
|
1 | 1562 |
} |
1498 | 1563 |
while (worklist.size() > 0) { |
1564 |
Block* uct = worklist.pop(); |
|
1565 |
uct->_freq = PROB_MIN; |
|
1566 |
for (uint i = 1; i < uct->num_preds(); i++) { |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1567 |
Block *pb = get_block_for_node(uct->pred(i)); |
1498 | 1568 |
if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { |
1569 |
worklist.push(pb); |
|
1570 |
} |
|
1 | 1571 |
} |
1572 |
} |
|
1573 |
} |
|
1574 |
||
2016
c1f73fa547fe
6784930: server jvm fails with assert(!n->is_SpillCopy(),"")
kvn
parents:
1498
diff
changeset
|
1575 |
#ifdef ASSERT |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1576 |
for (uint i = 0; i < number_of_blocks(); i++) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1577 |
Block* b = get_block(i); |
2131 | 1578 |
assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); |
2016
c1f73fa547fe
6784930: server jvm fails with assert(!n->is_SpillCopy(),"")
kvn
parents:
1498
diff
changeset
|
1579 |
} |
c1f73fa547fe
6784930: server jvm fails with assert(!n->is_SpillCopy(),"")
kvn
parents:
1498
diff
changeset
|
1580 |
#endif |
c1f73fa547fe
6784930: server jvm fails with assert(!n->is_SpillCopy(),"")
kvn
parents:
1498
diff
changeset
|
1581 |
|
1 | 1582 |
#ifndef PRODUCT |
1583 |
if (PrintCFGBlockFreq) { |
|
1584 |
tty->print_cr("CFG Block Frequencies"); |
|
1585 |
_root_loop->dump_tree(); |
|
1586 |
if (Verbose) { |
|
1587 |
tty->print_cr("PhaseCFG dump"); |
|
1588 |
dump(); |
|
1589 |
tty->print_cr("Node dump"); |
|
1590 |
_root->dump(99999); |
|
1591 |
} |
|
1592 |
} |
|
1593 |
#endif |
|
1594 |
} |
|
1595 |
||
1596 |
//----------------------------create_loop_tree-------------------------------- |
|
1597 |
// Create a loop tree from the CFG |
|
1598 |
CFGLoop* PhaseCFG::create_loop_tree() { |
|
1599 |
||
1600 |
#ifdef ASSERT |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1601 |
assert(get_block(0) == get_root_block(), "first block should be root block"); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1602 |
for (uint i = 0; i < number_of_blocks(); i++) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1603 |
Block* block = get_block(i); |
1 | 1604 |
// Check that _loop field are clear...we could clear them if not. |
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1605 |
assert(block->_loop == NULL, "clear _loop expected"); |
1 | 1606 |
// Sanity check that the RPO numbering is reflected in the _blocks array. |
1607 |
// It doesn't have to be for the loop tree to be built, but if it is not, |
|
1608 |
// then the blocks have been reordered since dom graph building...which |
|
1609 |
// may question the RPO numbering |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1610 |
assert(block->_rpo == i, "unexpected reverse post order number"); |
1 | 1611 |
} |
1612 |
#endif |
|
1613 |
||
1614 |
int idct = 0; |
|
1615 |
CFGLoop* root_loop = new CFGLoop(idct++); |
|
1616 |
||
1617 |
Block_List worklist; |
|
1618 |
||
1619 |
// Assign blocks to loops |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1620 |
for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1621 |
Block* block = get_block(i); |
1 | 1622 |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1623 |
if (block->head()->is_Loop()) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1624 |
Block* loop_head = block; |
1 | 1625 |
assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); |
1626 |
Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1627 |
Block* tail = get_block_for_node(tail_n); |
1 | 1628 |
|
1629 |
// Defensively filter out Loop nodes for non-single-entry loops. |
|
1630 |
// For all reasonable loops, the head occurs before the tail in RPO. |
|
1631 |
if (i <= tail->_rpo) { |
|
1632 |
||
1633 |
// The tail and (recursive) predecessors of the tail |
|
1634 |
// are made members of a new loop. |
|
1635 |
||
1636 |
assert(worklist.size() == 0, "nonempty worklist"); |
|
1637 |
CFGLoop* nloop = new CFGLoop(idct++); |
|
1638 |
assert(loop_head->_loop == NULL, "just checking"); |
|
1639 |
loop_head->_loop = nloop; |
|
1640 |
// Add to nloop so push_pred() will skip over inner loops |
|
1641 |
nloop->add_member(loop_head); |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1642 |
nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); |
1 | 1643 |
|
1644 |
while (worklist.size() > 0) { |
|
1645 |
Block* member = worklist.pop(); |
|
1646 |
if (member != loop_head) { |
|
1647 |
for (uint j = 1; j < member->num_preds(); j++) { |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1648 |
nloop->push_pred(member, j, worklist, this); |
1 | 1649 |
} |
1650 |
} |
|
1651 |
} |
|
1652 |
} |
|
1653 |
} |
|
1654 |
} |
|
1655 |
||
1656 |
// Create a member list for each loop consisting |
|
1657 |
// of both blocks and (immediate child) loops. |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1658 |
for (uint i = 0; i < number_of_blocks(); i++) { |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1659 |
Block* block = get_block(i); |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1660 |
CFGLoop* lp = block->_loop; |
1 | 1661 |
if (lp == NULL) { |
1662 |
// Not assigned to a loop. Add it to the method's pseudo loop. |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1663 |
block->_loop = root_loop; |
1 | 1664 |
lp = root_loop; |
1665 |
} |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1666 |
if (lp == root_loop || block != lp->head()) { // loop heads are already members |
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1667 |
lp->add_member(block); |
1 | 1668 |
} |
1669 |
if (lp != root_loop) { |
|
1670 |
if (lp->parent() == NULL) { |
|
1671 |
// Not a nested loop. Make it a child of the method's pseudo loop. |
|
1672 |
root_loop->add_nested_loop(lp); |
|
1673 |
} |
|
19330
49d6711171e6
8023003: Cleanup the public interface to PhaseCFG
adlertz
parents:
19279
diff
changeset
|
1674 |
if (block == lp->head()) { |
1 | 1675 |
// Add nested loop to member list of parent loop. |
1676 |
lp->parent()->add_member(lp); |
|
1677 |
} |
|
1678 |
} |
|
1679 |
} |
|
1680 |
||
1681 |
return root_loop; |
|
1682 |
} |
|
1683 |
||
1684 |
//------------------------------push_pred-------------------------------------- |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1685 |
void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { |
1 | 1686 |
Node* pred_n = blk->pred(i); |
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1687 |
Block* pred = cfg->get_block_for_node(pred_n); |
1 | 1688 |
CFGLoop *pred_loop = pred->_loop; |
1689 |
if (pred_loop == NULL) { |
|
1690 |
// Filter out blocks for non-single-entry loops. |
|
1691 |
// For all reasonable loops, the head occurs before the tail in RPO. |
|
1692 |
if (pred->_rpo > head()->_rpo) { |
|
1693 |
pred->_loop = this; |
|
1694 |
worklist.push(pred); |
|
1695 |
} |
|
1696 |
} else if (pred_loop != this) { |
|
1697 |
// Nested loop. |
|
1698 |
while (pred_loop->_parent != NULL && pred_loop->_parent != this) { |
|
1699 |
pred_loop = pred_loop->_parent; |
|
1700 |
} |
|
1701 |
// Make pred's loop be a child |
|
1702 |
if (pred_loop->_parent == NULL) { |
|
1703 |
add_nested_loop(pred_loop); |
|
1704 |
// Continue with loop entry predecessor. |
|
1705 |
Block* pred_head = pred_loop->head(); |
|
1706 |
assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); |
|
1707 |
assert(pred_head != head(), "loop head in only one loop"); |
|
19279
4be3c2e6663c
8022284: Hide internal data structure in PhaseCFG
adlertz
parents:
15871
diff
changeset
|
1708 |
push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); |
1 | 1709 |
} else { |
1710 |
assert(pred_loop->_parent == this && _parent == NULL, "just checking"); |
|
1711 |
} |
|
1712 |
} |
|
1713 |
} |
|
1714 |
||
1715 |
//------------------------------add_nested_loop-------------------------------- |
|
1716 |
// Make cl a child of the current loop in the loop tree. |
|
1717 |
void CFGLoop::add_nested_loop(CFGLoop* cl) { |
|
1718 |
assert(_parent == NULL, "no parent yet"); |
|
1719 |
assert(cl != this, "not my own parent"); |
|
1720 |
cl->_parent = this; |
|
1721 |
CFGLoop* ch = _child; |
|
1722 |
if (ch == NULL) { |
|
1723 |
_child = cl; |
|
1724 |
} else { |
|
1725 |
while (ch->_sibling != NULL) { ch = ch->_sibling; } |
|
1726 |
ch->_sibling = cl; |
|
1727 |
} |
|
1728 |
} |
|
1729 |
||
1730 |
//------------------------------compute_loop_depth----------------------------- |
|
1731 |
// Store the loop depth in each CFGLoop object. |
|
1732 |
// Recursively walk the children to do the same for them. |
|
1733 |
void CFGLoop::compute_loop_depth(int depth) { |
|
1734 |
_depth = depth; |
|
1735 |
CFGLoop* ch = _child; |
|
1736 |
while (ch != NULL) { |
|
1737 |
ch->compute_loop_depth(depth + 1); |
|
1738 |
ch = ch->_sibling; |
|
1739 |
} |
|
1740 |
} |
|
1741 |
||
1742 |
//------------------------------compute_freq----------------------------------- |
|
1743 |
// Compute the frequency of each block and loop, relative to a single entry |
|
1744 |
// into the dominating loop head. |
|
1745 |
void CFGLoop::compute_freq() { |
|
1746 |
// Bottom up traversal of loop tree (visit inner loops first.) |
|
1747 |
// Set loop head frequency to 1.0, then transitively |
|
1748 |
// compute frequency for all successors in the loop, |
|
1749 |
// as well as for each exit edge. Inner loops are |
|
1750 |
// treated as single blocks with loop exit targets |
|
1751 |
// as the successor blocks. |
|
1752 |
||
1753 |
// Nested loops first |
|
1754 |
CFGLoop* ch = _child; |
|
1755 |
while (ch != NULL) { |
|
1756 |
ch->compute_freq(); |
|
1757 |
ch = ch->_sibling; |
|
1758 |
} |
|
1759 |
assert (_members.length() > 0, "no empty loops"); |
|
1760 |
Block* hd = head(); |
|
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
1761 |
hd->_freq = 1.0; |
1 | 1762 |
for (int i = 0; i < _members.length(); i++) { |
1763 |
CFGElement* s = _members.at(i); |
|
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
1764 |
double freq = s->_freq; |
1 | 1765 |
if (s->is_block()) { |
1766 |
Block* b = s->as_Block(); |
|
1767 |
for (uint j = 0; j < b->_num_succs; j++) { |
|
1768 |
Block* sb = b->_succs[j]; |
|
1769 |
update_succ_freq(sb, freq * b->succ_prob(j)); |
|
1770 |
} |
|
1771 |
} else { |
|
1772 |
CFGLoop* lp = s->as_CFGLoop(); |
|
1773 |
assert(lp->_parent == this, "immediate child"); |
|
1774 |
for (int k = 0; k < lp->_exits.length(); k++) { |
|
1775 |
Block* eb = lp->_exits.at(k).get_target(); |
|
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
1776 |
double prob = lp->_exits.at(k).get_prob(); |
1 | 1777 |
update_succ_freq(eb, freq * prob); |
1778 |
} |
|
1779 |
} |
|
1780 |
} |
|
1781 |
||
1782 |
// For all loops other than the outer, "method" loop, |
|
1783 |
// sum and normalize the exit probability. The "method" loop |
|
1784 |
// should keep the initial exit probability of 1, so that |
|
1785 |
// inner blocks do not get erroneously scaled. |
|
1786 |
if (_depth != 0) { |
|
1787 |
// Total the exit probabilities for this loop. |
|
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
1788 |
double exits_sum = 0.0f; |
1 | 1789 |
for (int i = 0; i < _exits.length(); i++) { |
1790 |
exits_sum += _exits.at(i).get_prob(); |
|
1791 |
} |
|
1792 |
||
1793 |
// Normalize the exit probabilities. Until now, the |
|
1794 |
// probabilities estimate the possibility of exit per |
|
1795 |
// a single loop iteration; afterward, they estimate |
|
1796 |
// the probability of exit per loop entry. |
|
1797 |
for (int i = 0; i < _exits.length(); i++) { |
|
1798 |
Block* et = _exits.at(i).get_target(); |
|
1498 | 1799 |
float new_prob = 0.0f; |
1800 |
if (_exits.at(i).get_prob() > 0.0f) { |
|
1801 |
new_prob = _exits.at(i).get_prob() / exits_sum; |
|
1802 |
} |
|
1 | 1803 |
BlockProbPair bpp(et, new_prob); |
1804 |
_exits.at_put(i, bpp); |
|
1805 |
} |
|
1806 |
||
1498 | 1807 |
// Save the total, but guard against unreasonable probability, |
1 | 1808 |
// as the value is used to estimate the loop trip count. |
1809 |
// An infinite trip count would blur relative block |
|
1810 |
// frequencies. |
|
1811 |
if (exits_sum > 1.0f) exits_sum = 1.0; |
|
1812 |
if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; |
|
1813 |
_exit_prob = exits_sum; |
|
1814 |
} |
|
1815 |
} |
|
1816 |
||
1817 |
//------------------------------succ_prob------------------------------------- |
|
1818 |
// Determine the probability of reaching successor 'i' from the receiver block. |
|
1819 |
float Block::succ_prob(uint i) { |
|
1820 |
int eidx = end_idx(); |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1821 |
Node *n = get_node(eidx); // Get ending Node |
1070 | 1822 |
|
1823 |
int op = n->Opcode(); |
|
1824 |
if (n->is_Mach()) { |
|
1825 |
if (n->is_MachNullCheck()) { |
|
1826 |
// Can only reach here if called after lcm. The original Op_If is gone, |
|
1827 |
// so we attempt to infer the probability from one or both of the |
|
1828 |
// successor blocks. |
|
1829 |
assert(_num_succs == 2, "expecting 2 successors of a null check"); |
|
1830 |
// If either successor has only one predecessor, then the |
|
2131 | 1831 |
// probability estimate can be derived using the |
1070 | 1832 |
// relative frequency of the successor and this block. |
1833 |
if (_succs[i]->num_preds() == 2) { |
|
1834 |
return _succs[i]->_freq / _freq; |
|
1835 |
} else if (_succs[1-i]->num_preds() == 2) { |
|
1836 |
return 1 - (_succs[1-i]->_freq / _freq); |
|
1837 |
} else { |
|
1838 |
// Estimate using both successor frequencies |
|
1839 |
float freq = _succs[i]->_freq; |
|
1840 |
return freq / (freq + _succs[1-i]->_freq); |
|
1841 |
} |
|
1842 |
} |
|
1843 |
op = n->as_Mach()->ideal_Opcode(); |
|
1844 |
} |
|
1845 |
||
1 | 1846 |
|
1847 |
// Switch on branch type |
|
1848 |
switch( op ) { |
|
1849 |
case Op_CountedLoopEnd: |
|
1850 |
case Op_If: { |
|
1851 |
assert (i < 2, "just checking"); |
|
1852 |
// Conditionals pass on only part of their frequency |
|
1853 |
float prob = n->as_MachIf()->_prob; |
|
1854 |
assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); |
|
1855 |
// If succ[i] is the FALSE branch, invert path info |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1856 |
if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { |
1 | 1857 |
return 1.0f - prob; // not taken |
1858 |
} else { |
|
1859 |
return prob; // taken |
|
1860 |
} |
|
1861 |
} |
|
1862 |
||
1863 |
case Op_Jump: |
|
1864 |
// Divide the frequency between all successors evenly |
|
1865 |
return 1.0f/_num_succs; |
|
1866 |
||
1867 |
case Op_Catch: { |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1868 |
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); |
1 | 1869 |
if (ci->_con == CatchProjNode::fall_through_index) { |
1870 |
// Fall-thru path gets the lion's share. |
|
1871 |
return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; |
|
1872 |
} else { |
|
1873 |
// Presume exceptional paths are equally unlikely |
|
1874 |
return PROB_UNLIKELY_MAG(5); |
|
1875 |
} |
|
1876 |
} |
|
1877 |
||
1878 |
case Op_Root: |
|
1879 |
case Op_Goto: |
|
1880 |
// Pass frequency straight thru to target |
|
1881 |
return 1.0f; |
|
1882 |
||
1883 |
case Op_NeverBranch: |
|
1884 |
return 0.0f; |
|
1885 |
||
1886 |
case Op_TailCall: |
|
1887 |
case Op_TailJump: |
|
1888 |
case Op_Return: |
|
1889 |
case Op_Halt: |
|
1890 |
case Op_Rethrow: |
|
1891 |
// Do not push out freq to root block |
|
1892 |
return 0.0f; |
|
1893 |
||
1894 |
default: |
|
1895 |
ShouldNotReachHere(); |
|
1896 |
} |
|
1897 |
||
1898 |
return 0.0f; |
|
1899 |
} |
|
1900 |
||
1498 | 1901 |
//------------------------------num_fall_throughs----------------------------- |
1902 |
// Return the number of fall-through candidates for a block |
|
1903 |
int Block::num_fall_throughs() { |
|
1904 |
int eidx = end_idx(); |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1905 |
Node *n = get_node(eidx); // Get ending Node |
1498 | 1906 |
|
1907 |
int op = n->Opcode(); |
|
1908 |
if (n->is_Mach()) { |
|
1909 |
if (n->is_MachNullCheck()) { |
|
1910 |
// In theory, either side can fall-thru, for simplicity sake, |
|
1911 |
// let's say only the false branch can now. |
|
1912 |
return 1; |
|
1913 |
} |
|
1914 |
op = n->as_Mach()->ideal_Opcode(); |
|
1915 |
} |
|
1916 |
||
1917 |
// Switch on branch type |
|
1918 |
switch( op ) { |
|
1919 |
case Op_CountedLoopEnd: |
|
1920 |
case Op_If: |
|
1921 |
return 2; |
|
1922 |
||
1923 |
case Op_Root: |
|
1924 |
case Op_Goto: |
|
1925 |
return 1; |
|
1926 |
||
1927 |
case Op_Catch: { |
|
1928 |
for (uint i = 0; i < _num_succs; i++) { |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1929 |
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); |
1498 | 1930 |
if (ci->_con == CatchProjNode::fall_through_index) { |
1931 |
return 1; |
|
1932 |
} |
|
1933 |
} |
|
1934 |
return 0; |
|
1935 |
} |
|
1936 |
||
1937 |
case Op_Jump: |
|
1938 |
case Op_NeverBranch: |
|
1939 |
case Op_TailCall: |
|
1940 |
case Op_TailJump: |
|
1941 |
case Op_Return: |
|
1942 |
case Op_Halt: |
|
1943 |
case Op_Rethrow: |
|
1944 |
return 0; |
|
1945 |
||
1946 |
default: |
|
1947 |
ShouldNotReachHere(); |
|
1948 |
} |
|
1949 |
||
1950 |
return 0; |
|
1951 |
} |
|
1952 |
||
1953 |
//------------------------------succ_fall_through----------------------------- |
|
1954 |
// Return true if a specific successor could be fall-through target. |
|
1955 |
bool Block::succ_fall_through(uint i) { |
|
1956 |
int eidx = end_idx(); |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1957 |
Node *n = get_node(eidx); // Get ending Node |
1498 | 1958 |
|
1959 |
int op = n->Opcode(); |
|
1960 |
if (n->is_Mach()) { |
|
1961 |
if (n->is_MachNullCheck()) { |
|
1962 |
// In theory, either side can fall-thru, for simplicity sake, |
|
1963 |
// let's say only the false branch can now. |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1964 |
return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; |
1498 | 1965 |
} |
1966 |
op = n->as_Mach()->ideal_Opcode(); |
|
1967 |
} |
|
1968 |
||
1969 |
// Switch on branch type |
|
1970 |
switch( op ) { |
|
1971 |
case Op_CountedLoopEnd: |
|
1972 |
case Op_If: |
|
1973 |
case Op_Root: |
|
1974 |
case Op_Goto: |
|
1975 |
return true; |
|
1976 |
||
1977 |
case Op_Catch: { |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
1978 |
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); |
1498 | 1979 |
return ci->_con == CatchProjNode::fall_through_index; |
1980 |
} |
|
1981 |
||
1982 |
case Op_Jump: |
|
1983 |
case Op_NeverBranch: |
|
1984 |
case Op_TailCall: |
|
1985 |
case Op_TailJump: |
|
1986 |
case Op_Return: |
|
1987 |
case Op_Halt: |
|
1988 |
case Op_Rethrow: |
|
1989 |
return false; |
|
1990 |
||
1991 |
default: |
|
1992 |
ShouldNotReachHere(); |
|
1993 |
} |
|
1994 |
||
1995 |
return false; |
|
1996 |
} |
|
1997 |
||
1998 |
//------------------------------update_uncommon_branch------------------------ |
|
1999 |
// Update the probability of a two-branch to be uncommon |
|
2000 |
void Block::update_uncommon_branch(Block* ub) { |
|
2001 |
int eidx = end_idx(); |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
2002 |
Node *n = get_node(eidx); // Get ending Node |
1498 | 2003 |
|
2004 |
int op = n->as_Mach()->ideal_Opcode(); |
|
2005 |
||
2006 |
assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); |
|
2007 |
assert(num_fall_throughs() == 2, "must be a two way branch block"); |
|
2008 |
||
2009 |
// Which successor is ub? |
|
2010 |
uint s; |
|
2011 |
for (s = 0; s <_num_succs; s++) { |
|
2012 |
if (_succs[s] == ub) break; |
|
2013 |
} |
|
2014 |
assert(s < 2, "uncommon successor must be found"); |
|
2015 |
||
2016 |
// If ub is the true path, make the proability small, else |
|
2017 |
// ub is the false path, and make the probability large |
|
19717
7819ffdaf0ff
8023691: Create interface for nodes in class Block
adlertz
parents:
19330
diff
changeset
|
2018 |
bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); |
1498 | 2019 |
|
2020 |
// Get existing probability |
|
2021 |
float p = n->as_MachIf()->_prob; |
|
2022 |
||
2023 |
if (invert) p = 1.0 - p; |
|
2024 |
if (p > PROB_MIN) { |
|
2025 |
p = PROB_MIN; |
|
2026 |
} |
|
2027 |
if (invert) p = 1.0 - p; |
|
2028 |
||
2029 |
n->as_MachIf()->_prob = p; |
|
2030 |
} |
|
2031 |
||
1 | 2032 |
//------------------------------update_succ_freq------------------------------- |
2131 | 2033 |
// Update the appropriate frequency associated with block 'b', a successor of |
1 | 2034 |
// a block in this loop. |
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
2035 |
void CFGLoop::update_succ_freq(Block* b, double freq) { |
1 | 2036 |
if (b->_loop == this) { |
2037 |
if (b == head()) { |
|
2038 |
// back branch within the loop |
|
2039 |
// Do nothing now, the loop carried frequency will be |
|
2040 |
// adjust later in scale_freq(). |
|
2041 |
} else { |
|
2042 |
// simple branch within the loop |
|
2043 |
b->_freq += freq; |
|
2044 |
} |
|
2045 |
} else if (!in_loop_nest(b)) { |
|
2046 |
// branch is exit from this loop |
|
2047 |
BlockProbPair bpp(b, freq); |
|
2048 |
_exits.append(bpp); |
|
2049 |
} else { |
|
2050 |
// branch into nested loop |
|
2051 |
CFGLoop* ch = b->_loop; |
|
2052 |
ch->_freq += freq; |
|
2053 |
} |
|
2054 |
} |
|
2055 |
||
2056 |
//------------------------------in_loop_nest----------------------------------- |
|
2057 |
// Determine if block b is in the receiver's loop nest. |
|
2058 |
bool CFGLoop::in_loop_nest(Block* b) { |
|
2059 |
int depth = _depth; |
|
2060 |
CFGLoop* b_loop = b->_loop; |
|
2061 |
int b_depth = b_loop->_depth; |
|
2062 |
if (depth == b_depth) { |
|
2063 |
return true; |
|
2064 |
} |
|
2065 |
while (b_depth > depth) { |
|
2066 |
b_loop = b_loop->_parent; |
|
2067 |
b_depth = b_loop->_depth; |
|
2068 |
} |
|
2069 |
return b_loop == this; |
|
2070 |
} |
|
2071 |
||
2072 |
//------------------------------scale_freq------------------------------------- |
|
2073 |
// Scale frequency of loops and blocks by trip counts from outer loops |
|
2074 |
// Do a top down traversal of loop tree (visit outer loops first.) |
|
2075 |
void CFGLoop::scale_freq() { |
|
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
2076 |
double loop_freq = _freq * trip_count(); |
2340 | 2077 |
_freq = loop_freq; |
1 | 2078 |
for (int i = 0; i < _members.length(); i++) { |
2079 |
CFGElement* s = _members.at(i); |
|
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
2080 |
double block_freq = s->_freq * loop_freq; |
2147 | 2081 |
if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) |
2082 |
block_freq = MIN_BLOCK_FREQUENCY; |
|
2016
c1f73fa547fe
6784930: server jvm fails with assert(!n->is_SpillCopy(),"")
kvn
parents:
1498
diff
changeset
|
2083 |
s->_freq = block_freq; |
1 | 2084 |
} |
2085 |
CFGLoop* ch = _child; |
|
2086 |
while (ch != NULL) { |
|
2087 |
ch->scale_freq(); |
|
2088 |
ch = ch->_sibling; |
|
2089 |
} |
|
2090 |
} |
|
2091 |
||
2340 | 2092 |
// Frequency of outer loop |
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
2093 |
double CFGLoop::outer_loop_freq() const { |
2340 | 2094 |
if (_child != NULL) { |
2095 |
return _child->_freq; |
|
2096 |
} |
|
2097 |
return _freq; |
|
2098 |
} |
|
2099 |
||
1 | 2100 |
#ifndef PRODUCT |
2101 |
//------------------------------dump_tree-------------------------------------- |
|
2102 |
void CFGLoop::dump_tree() const { |
|
2103 |
dump(); |
|
2104 |
if (_child != NULL) _child->dump_tree(); |
|
2105 |
if (_sibling != NULL) _sibling->dump_tree(); |
|
2106 |
} |
|
2107 |
||
2108 |
//------------------------------dump------------------------------------------- |
|
2109 |
void CFGLoop::dump() const { |
|
2110 |
for (int i = 0; i < _depth; i++) tty->print(" "); |
|
2111 |
tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", |
|
2112 |
_depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); |
|
2113 |
for (int i = 0; i < _depth; i++) tty->print(" "); |
|
24424
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
22915
diff
changeset
|
2114 |
tty->print(" members:"); |
1 | 2115 |
int k = 0; |
2116 |
for (int i = 0; i < _members.length(); i++) { |
|
2117 |
if (k++ >= 6) { |
|
2118 |
tty->print("\n "); |
|
2119 |
for (int j = 0; j < _depth+1; j++) tty->print(" "); |
|
2120 |
k = 0; |
|
2121 |
} |
|
2122 |
CFGElement *s = _members.at(i); |
|
2123 |
if (s->is_block()) { |
|
2124 |
Block *b = s->as_Block(); |
|
2125 |
tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); |
|
2126 |
} else { |
|
2127 |
CFGLoop* lp = s->as_CFGLoop(); |
|
2128 |
tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); |
|
2129 |
} |
|
2130 |
} |
|
2131 |
tty->print("\n"); |
|
2132 |
for (int i = 0; i < _depth; i++) tty->print(" "); |
|
2133 |
tty->print(" exits: "); |
|
2134 |
k = 0; |
|
2135 |
for (int i = 0; i < _exits.length(); i++) { |
|
2136 |
if (k++ >= 7) { |
|
2137 |
tty->print("\n "); |
|
2138 |
for (int j = 0; j < _depth+1; j++) tty->print(" "); |
|
2139 |
k = 0; |
|
2140 |
} |
|
2141 |
Block *blk = _exits.at(i).get_target(); |
|
22915
231c85af5482
8033260: assert(lrg._area >= 0.0) failed: negative spill area
adlertz
parents:
22872
diff
changeset
|
2142 |
double prob = _exits.at(i).get_prob(); |
1 | 2143 |
tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); |
2144 |
} |
|
2145 |
tty->print("\n"); |
|
2146 |
} |
|
2147 |
#endif |