1
|
1 |
/*
|
|
2 |
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
// Portions of code courtesy of Clifford Click
|
|
26 |
|
|
27 |
// Optimization - Graph Style
|
|
28 |
|
|
29 |
|
|
30 |
class AbstractLockNode;
|
|
31 |
class AddNode;
|
|
32 |
class AddPNode;
|
|
33 |
class AliasInfo;
|
|
34 |
class AllocateArrayNode;
|
|
35 |
class AllocateNode;
|
|
36 |
class Block;
|
|
37 |
class Block_Array;
|
|
38 |
class BoolNode;
|
|
39 |
class BoxLockNode;
|
|
40 |
class CMoveNode;
|
|
41 |
class CallDynamicJavaNode;
|
|
42 |
class CallJavaNode;
|
|
43 |
class CallLeafNode;
|
|
44 |
class CallNode;
|
|
45 |
class CallRuntimeNode;
|
|
46 |
class CallStaticJavaNode;
|
|
47 |
class CatchNode;
|
|
48 |
class CatchProjNode;
|
|
49 |
class CheckCastPPNode;
|
|
50 |
class CmpNode;
|
|
51 |
class CodeBuffer;
|
|
52 |
class ConstraintCastNode;
|
|
53 |
class ConNode;
|
|
54 |
class CountedLoopNode;
|
|
55 |
class CountedLoopEndNode;
|
|
56 |
class FastLockNode;
|
|
57 |
class FastUnlockNode;
|
|
58 |
class IfNode;
|
|
59 |
class InitializeNode;
|
|
60 |
class JVMState;
|
|
61 |
class JumpNode;
|
|
62 |
class JumpProjNode;
|
|
63 |
class LoadNode;
|
|
64 |
class LoadStoreNode;
|
|
65 |
class LockNode;
|
|
66 |
class LoopNode;
|
|
67 |
class MachCallDynamicJavaNode;
|
|
68 |
class MachCallJavaNode;
|
|
69 |
class MachCallLeafNode;
|
|
70 |
class MachCallNode;
|
|
71 |
class MachCallRuntimeNode;
|
|
72 |
class MachCallStaticJavaNode;
|
|
73 |
class MachIfNode;
|
|
74 |
class MachNode;
|
|
75 |
class MachNullCheckNode;
|
|
76 |
class MachReturnNode;
|
|
77 |
class MachSafePointNode;
|
|
78 |
class MachSpillCopyNode;
|
|
79 |
class MachTempNode;
|
|
80 |
class Matcher;
|
|
81 |
class MemBarNode;
|
|
82 |
class MemNode;
|
|
83 |
class MergeMemNode;
|
|
84 |
class MulNode;
|
|
85 |
class MultiNode;
|
|
86 |
class MultiBranchNode;
|
|
87 |
class NeverBranchNode;
|
|
88 |
class Node;
|
|
89 |
class Node_Array;
|
|
90 |
class Node_List;
|
|
91 |
class Node_Stack;
|
|
92 |
class NullCheckNode;
|
|
93 |
class OopMap;
|
206
|
94 |
class ParmNode;
|
1
|
95 |
class PCTableNode;
|
|
96 |
class PhaseCCP;
|
|
97 |
class PhaseGVN;
|
|
98 |
class PhaseIterGVN;
|
|
99 |
class PhaseRegAlloc;
|
|
100 |
class PhaseTransform;
|
|
101 |
class PhaseValues;
|
|
102 |
class PhiNode;
|
|
103 |
class Pipeline;
|
|
104 |
class ProjNode;
|
|
105 |
class RegMask;
|
|
106 |
class RegionNode;
|
|
107 |
class RootNode;
|
|
108 |
class SafePointNode;
|
|
109 |
class StartNode;
|
|
110 |
class State;
|
|
111 |
class StoreNode;
|
|
112 |
class SubNode;
|
|
113 |
class Type;
|
|
114 |
class TypeNode;
|
|
115 |
class UnlockNode;
|
|
116 |
class VectorSet;
|
|
117 |
class IfTrueNode;
|
|
118 |
class IfFalseNode;
|
|
119 |
typedef void (*NFunc)(Node&,void*);
|
|
120 |
extern "C" {
|
|
121 |
typedef int (*C_sort_func_t)(const void *, const void *);
|
|
122 |
}
|
|
123 |
|
|
124 |
// The type of all node counts and indexes.
|
|
125 |
// It must hold at least 16 bits, but must also be fast to load and store.
|
|
126 |
// This type, if less than 32 bits, could limit the number of possible nodes.
|
|
127 |
// (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
|
|
128 |
typedef unsigned int node_idx_t;
|
|
129 |
|
|
130 |
|
|
131 |
#ifndef OPTO_DU_ITERATOR_ASSERT
|
|
132 |
#ifdef ASSERT
|
|
133 |
#define OPTO_DU_ITERATOR_ASSERT 1
|
|
134 |
#else
|
|
135 |
#define OPTO_DU_ITERATOR_ASSERT 0
|
|
136 |
#endif
|
|
137 |
#endif //OPTO_DU_ITERATOR_ASSERT
|
|
138 |
|
|
139 |
#if OPTO_DU_ITERATOR_ASSERT
|
|
140 |
class DUIterator;
|
|
141 |
class DUIterator_Fast;
|
|
142 |
class DUIterator_Last;
|
|
143 |
#else
|
|
144 |
typedef uint DUIterator;
|
|
145 |
typedef Node** DUIterator_Fast;
|
|
146 |
typedef Node** DUIterator_Last;
|
|
147 |
#endif
|
|
148 |
|
|
149 |
// Node Sentinel
|
|
150 |
#define NodeSentinel (Node*)-1
|
|
151 |
|
|
152 |
// Unknown count frequency
|
|
153 |
#define COUNT_UNKNOWN (-1.0f)
|
|
154 |
|
|
155 |
//------------------------------Node-------------------------------------------
|
|
156 |
// Nodes define actions in the program. They create values, which have types.
|
|
157 |
// They are both vertices in a directed graph and program primitives. Nodes
|
|
158 |
// are labeled; the label is the "opcode", the primitive function in the lambda
|
|
159 |
// calculus sense that gives meaning to the Node. Node inputs are ordered (so
|
|
160 |
// that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
|
|
161 |
// the Node's function. These inputs also define a Type equation for the Node.
|
|
162 |
// Solving these Type equations amounts to doing dataflow analysis.
|
|
163 |
// Control and data are uniformly represented in the graph. Finally, Nodes
|
|
164 |
// have a unique dense integer index which is used to index into side arrays
|
|
165 |
// whenever I have phase-specific information.
|
|
166 |
|
|
167 |
class Node {
|
|
168 |
// Lots of restrictions on cloning Nodes
|
|
169 |
Node(const Node&); // not defined; linker error to use these
|
|
170 |
Node &operator=(const Node &rhs);
|
|
171 |
|
|
172 |
public:
|
|
173 |
friend class Compile;
|
|
174 |
#if OPTO_DU_ITERATOR_ASSERT
|
|
175 |
friend class DUIterator_Common;
|
|
176 |
friend class DUIterator;
|
|
177 |
friend class DUIterator_Fast;
|
|
178 |
friend class DUIterator_Last;
|
|
179 |
#endif
|
|
180 |
|
|
181 |
// Because Nodes come and go, I define an Arena of Node structures to pull
|
|
182 |
// from. This should allow fast access to node creation & deletion. This
|
|
183 |
// field is a local cache of a value defined in some "program fragment" for
|
|
184 |
// which these Nodes are just a part of.
|
|
185 |
|
|
186 |
// New Operator that takes a Compile pointer, this will eventually
|
|
187 |
// be the "new" New operator.
|
|
188 |
inline void* operator new( size_t x, Compile* C) {
|
|
189 |
Node* n = (Node*)C->node_arena()->Amalloc_D(x);
|
|
190 |
#ifdef ASSERT
|
|
191 |
n->_in = (Node**)n; // magic cookie for assertion check
|
|
192 |
#endif
|
|
193 |
n->_out = (Node**)C;
|
|
194 |
return (void*)n;
|
|
195 |
}
|
|
196 |
|
|
197 |
// New Operator that takes a Compile pointer, this will eventually
|
|
198 |
// be the "new" New operator.
|
|
199 |
inline void* operator new( size_t x, Compile* C, int y) {
|
|
200 |
Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*));
|
|
201 |
n->_in = (Node**)(((char*)n) + x);
|
|
202 |
#ifdef ASSERT
|
|
203 |
n->_in[y-1] = n; // magic cookie for assertion check
|
|
204 |
#endif
|
|
205 |
n->_out = (Node**)C;
|
|
206 |
return (void*)n;
|
|
207 |
}
|
|
208 |
|
|
209 |
// Delete is a NOP
|
|
210 |
void operator delete( void *ptr ) {}
|
|
211 |
// Fancy destructor; eagerly attempt to reclaim Node numberings and storage
|
|
212 |
void destruct();
|
|
213 |
|
|
214 |
// Create a new Node. Required is the number is of inputs required for
|
|
215 |
// semantic correctness.
|
|
216 |
Node( uint required );
|
|
217 |
|
|
218 |
// Create a new Node with given input edges.
|
|
219 |
// This version requires use of the "edge-count" new.
|
|
220 |
// E.g. new (C,3) FooNode( C, NULL, left, right );
|
|
221 |
Node( Node *n0 );
|
|
222 |
Node( Node *n0, Node *n1 );
|
|
223 |
Node( Node *n0, Node *n1, Node *n2 );
|
|
224 |
Node( Node *n0, Node *n1, Node *n2, Node *n3 );
|
|
225 |
Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
|
|
226 |
Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
|
|
227 |
Node( Node *n0, Node *n1, Node *n2, Node *n3,
|
|
228 |
Node *n4, Node *n5, Node *n6 );
|
|
229 |
|
|
230 |
// Clone an inherited Node given only the base Node type.
|
|
231 |
Node* clone() const;
|
|
232 |
|
|
233 |
// Clone a Node, immediately supplying one or two new edges.
|
|
234 |
// The first and second arguments, if non-null, replace in(1) and in(2),
|
|
235 |
// respectively.
|
|
236 |
Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
|
|
237 |
Node* nn = clone();
|
|
238 |
if (in1 != NULL) nn->set_req(1, in1);
|
|
239 |
if (in2 != NULL) nn->set_req(2, in2);
|
|
240 |
return nn;
|
|
241 |
}
|
|
242 |
|
|
243 |
private:
|
|
244 |
// Shared setup for the above constructors.
|
|
245 |
// Handles all interactions with Compile::current.
|
|
246 |
// Puts initial values in all Node fields except _idx.
|
|
247 |
// Returns the initial value for _idx, which cannot
|
|
248 |
// be initialized by assignment.
|
|
249 |
inline int Init(int req, Compile* C);
|
|
250 |
|
|
251 |
//----------------- input edge handling
|
|
252 |
protected:
|
|
253 |
friend class PhaseCFG; // Access to address of _in array elements
|
|
254 |
Node **_in; // Array of use-def references to Nodes
|
|
255 |
Node **_out; // Array of def-use references to Nodes
|
|
256 |
|
|
257 |
// Input edges are split into two catagories. Required edges are required
|
|
258 |
// for semantic correctness; order is important and NULLs are allowed.
|
|
259 |
// Precedence edges are used to help determine execution order and are
|
|
260 |
// added, e.g., for scheduling purposes. They are unordered and not
|
|
261 |
// duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1
|
|
262 |
// are required, from _cnt to _max-1 are precedence edges.
|
|
263 |
node_idx_t _cnt; // Total number of required Node inputs.
|
|
264 |
|
|
265 |
node_idx_t _max; // Actual length of input array.
|
|
266 |
|
|
267 |
// Output edges are an unordered list of def-use edges which exactly
|
|
268 |
// correspond to required input edges which point from other nodes
|
|
269 |
// to this one. Thus the count of the output edges is the number of
|
|
270 |
// users of this node.
|
|
271 |
node_idx_t _outcnt; // Total number of Node outputs.
|
|
272 |
|
|
273 |
node_idx_t _outmax; // Actual length of output array.
|
|
274 |
|
|
275 |
// Grow the actual input array to the next larger power-of-2 bigger than len.
|
|
276 |
void grow( uint len );
|
|
277 |
// Grow the output array to the next larger power-of-2 bigger than len.
|
|
278 |
void out_grow( uint len );
|
|
279 |
|
|
280 |
public:
|
|
281 |
// Each Node is assigned a unique small/dense number. This number is used
|
|
282 |
// to index into auxiliary arrays of data and bitvectors.
|
|
283 |
// It is declared const to defend against inadvertant assignment,
|
|
284 |
// since it is used by clients as a naked field.
|
|
285 |
const node_idx_t _idx;
|
|
286 |
|
|
287 |
// Get the (read-only) number of input edges
|
|
288 |
uint req() const { return _cnt; }
|
|
289 |
uint len() const { return _max; }
|
|
290 |
// Get the (read-only) number of output edges
|
|
291 |
uint outcnt() const { return _outcnt; }
|
|
292 |
|
|
293 |
#if OPTO_DU_ITERATOR_ASSERT
|
|
294 |
// Iterate over the out-edges of this node. Deletions are illegal.
|
|
295 |
inline DUIterator outs() const;
|
|
296 |
// Use this when the out array might have changed to suppress asserts.
|
|
297 |
inline DUIterator& refresh_out_pos(DUIterator& i) const;
|
|
298 |
// Does the node have an out at this position? (Used for iteration.)
|
|
299 |
inline bool has_out(DUIterator& i) const;
|
|
300 |
inline Node* out(DUIterator& i) const;
|
|
301 |
// Iterate over the out-edges of this node. All changes are illegal.
|
|
302 |
inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
|
|
303 |
inline Node* fast_out(DUIterator_Fast& i) const;
|
|
304 |
// Iterate over the out-edges of this node, deleting one at a time.
|
|
305 |
inline DUIterator_Last last_outs(DUIterator_Last& min) const;
|
|
306 |
inline Node* last_out(DUIterator_Last& i) const;
|
|
307 |
// The inline bodies of all these methods are after the iterator definitions.
|
|
308 |
#else
|
|
309 |
// Iterate over the out-edges of this node. Deletions are illegal.
|
|
310 |
// This iteration uses integral indexes, to decouple from array reallocations.
|
|
311 |
DUIterator outs() const { return 0; }
|
|
312 |
// Use this when the out array might have changed to suppress asserts.
|
|
313 |
DUIterator refresh_out_pos(DUIterator i) const { return i; }
|
|
314 |
|
|
315 |
// Reference to the i'th output Node. Error if out of bounds.
|
|
316 |
Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
|
|
317 |
// Does the node have an out at this position? (Used for iteration.)
|
|
318 |
bool has_out(DUIterator i) const { return i < _outcnt; }
|
|
319 |
|
|
320 |
// Iterate over the out-edges of this node. All changes are illegal.
|
|
321 |
// This iteration uses a pointer internal to the out array.
|
|
322 |
DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
|
|
323 |
Node** out = _out;
|
|
324 |
// Assign a limit pointer to the reference argument:
|
|
325 |
max = out + (ptrdiff_t)_outcnt;
|
|
326 |
// Return the base pointer:
|
|
327 |
return out;
|
|
328 |
}
|
|
329 |
Node* fast_out(DUIterator_Fast i) const { return *i; }
|
|
330 |
// Iterate over the out-edges of this node, deleting one at a time.
|
|
331 |
// This iteration uses a pointer internal to the out array.
|
|
332 |
DUIterator_Last last_outs(DUIterator_Last& min) const {
|
|
333 |
Node** out = _out;
|
|
334 |
// Assign a limit pointer to the reference argument:
|
|
335 |
min = out;
|
|
336 |
// Return the pointer to the start of the iteration:
|
|
337 |
return out + (ptrdiff_t)_outcnt - 1;
|
|
338 |
}
|
|
339 |
Node* last_out(DUIterator_Last i) const { return *i; }
|
|
340 |
#endif
|
|
341 |
|
|
342 |
// Reference to the i'th input Node. Error if out of bounds.
|
|
343 |
Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; }
|
|
344 |
// Reference to the i'th output Node. Error if out of bounds.
|
|
345 |
// Use this accessor sparingly. We are going trying to use iterators instead.
|
|
346 |
Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
|
|
347 |
// Return the unique out edge.
|
|
348 |
Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
|
|
349 |
// Delete out edge at position 'i' by moving last out edge to position 'i'
|
|
350 |
void raw_del_out(uint i) {
|
|
351 |
assert(i < _outcnt,"oob");
|
|
352 |
assert(_outcnt > 0,"oob");
|
|
353 |
#if OPTO_DU_ITERATOR_ASSERT
|
|
354 |
// Record that a change happened here.
|
|
355 |
debug_only(_last_del = _out[i]; ++_del_tick);
|
|
356 |
#endif
|
|
357 |
_out[i] = _out[--_outcnt];
|
|
358 |
// Smash the old edge so it can't be used accidentally.
|
|
359 |
debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
|
|
360 |
}
|
|
361 |
|
|
362 |
#ifdef ASSERT
|
|
363 |
bool is_dead() const;
|
|
364 |
#define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
|
|
365 |
#endif
|
|
366 |
|
|
367 |
// Set a required input edge, also updates corresponding output edge
|
|
368 |
void add_req( Node *n ); // Append a NEW required input
|
|
369 |
void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
|
|
370 |
void del_req( uint idx ); // Delete required edge & compact
|
|
371 |
void ins_req( uint i, Node *n ); // Insert a NEW required input
|
|
372 |
void set_req( uint i, Node *n ) {
|
|
373 |
assert( is_not_dead(n), "can not use dead node");
|
|
374 |
assert( i < _cnt, "oob");
|
|
375 |
assert( !VerifyHashTableKeys || _hash_lock == 0,
|
|
376 |
"remove node from hash table before modifying it");
|
|
377 |
Node** p = &_in[i]; // cache this._in, across the del_out call
|
|
378 |
if (*p != NULL) (*p)->del_out((Node *)this);
|
|
379 |
(*p) = n;
|
|
380 |
if (n != NULL) n->add_out((Node *)this);
|
|
381 |
}
|
|
382 |
// Light version of set_req() to init inputs after node creation.
|
|
383 |
void init_req( uint i, Node *n ) {
|
|
384 |
assert( i == 0 && this == n ||
|
|
385 |
is_not_dead(n), "can not use dead node");
|
|
386 |
assert( i < _cnt, "oob");
|
|
387 |
assert( !VerifyHashTableKeys || _hash_lock == 0,
|
|
388 |
"remove node from hash table before modifying it");
|
|
389 |
assert( _in[i] == NULL, "sanity");
|
|
390 |
_in[i] = n;
|
|
391 |
if (n != NULL) n->add_out((Node *)this);
|
|
392 |
}
|
|
393 |
// Find first occurrence of n among my edges:
|
|
394 |
int find_edge(Node* n);
|
|
395 |
int replace_edge(Node* old, Node* neww);
|
|
396 |
// NULL out all inputs to eliminate incoming Def-Use edges.
|
|
397 |
// Return the number of edges between 'n' and 'this'
|
|
398 |
int disconnect_inputs(Node *n);
|
|
399 |
|
|
400 |
// Quickly, return true if and only if I am Compile::current()->top().
|
|
401 |
bool is_top() const {
|
|
402 |
assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
|
|
403 |
return (_out == NULL);
|
|
404 |
}
|
|
405 |
// Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
|
|
406 |
void setup_is_top();
|
|
407 |
|
|
408 |
// Strip away casting. (It is depth-limited.)
|
|
409 |
Node* uncast() const;
|
|
410 |
|
|
411 |
private:
|
|
412 |
static Node* uncast_helper(const Node* n);
|
|
413 |
|
|
414 |
// Add an output edge to the end of the list
|
|
415 |
void add_out( Node *n ) {
|
|
416 |
if (is_top()) return;
|
|
417 |
if( _outcnt == _outmax ) out_grow(_outcnt);
|
|
418 |
_out[_outcnt++] = n;
|
|
419 |
}
|
|
420 |
// Delete an output edge
|
|
421 |
void del_out( Node *n ) {
|
|
422 |
if (is_top()) return;
|
|
423 |
Node** outp = &_out[_outcnt];
|
|
424 |
// Find and remove n
|
|
425 |
do {
|
|
426 |
assert(outp > _out, "Missing Def-Use edge");
|
|
427 |
} while (*--outp != n);
|
|
428 |
*outp = _out[--_outcnt];
|
|
429 |
// Smash the old edge so it can't be used accidentally.
|
|
430 |
debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
|
|
431 |
// Record that a change happened here.
|
|
432 |
#if OPTO_DU_ITERATOR_ASSERT
|
|
433 |
debug_only(_last_del = n; ++_del_tick);
|
|
434 |
#endif
|
|
435 |
}
|
|
436 |
|
|
437 |
public:
|
|
438 |
// Globally replace this node by a given new node, updating all uses.
|
|
439 |
void replace_by(Node* new_node);
|
|
440 |
void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
|
|
441 |
// Find the one non-null required input. RegionNode only
|
|
442 |
Node *nonnull_req() const;
|
|
443 |
// Add or remove precedence edges
|
|
444 |
void add_prec( Node *n );
|
|
445 |
void rm_prec( uint i );
|
|
446 |
void set_prec( uint i, Node *n ) {
|
|
447 |
assert( is_not_dead(n), "can not use dead node");
|
|
448 |
assert( i >= _cnt, "not a precedence edge");
|
|
449 |
if (_in[i] != NULL) _in[i]->del_out((Node *)this);
|
|
450 |
_in[i] = n;
|
|
451 |
if (n != NULL) n->add_out((Node *)this);
|
|
452 |
}
|
|
453 |
// Set this node's index, used by cisc_version to replace current node
|
|
454 |
void set_idx(uint new_idx) {
|
|
455 |
const node_idx_t* ref = &_idx;
|
|
456 |
*(node_idx_t*)ref = new_idx;
|
|
457 |
}
|
|
458 |
// Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
|
|
459 |
void swap_edges(uint i1, uint i2) {
|
|
460 |
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
|
|
461 |
// Def-Use info is unchanged
|
|
462 |
Node* n1 = in(i1);
|
|
463 |
Node* n2 = in(i2);
|
|
464 |
_in[i1] = n2;
|
|
465 |
_in[i2] = n1;
|
|
466 |
// If this node is in the hash table, make sure it doesn't need a rehash.
|
|
467 |
assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
|
|
468 |
}
|
|
469 |
|
|
470 |
// Iterators over input Nodes for a Node X are written as:
|
|
471 |
// for( i = 0; i < X.req(); i++ ) ... X[i] ...
|
|
472 |
// NOTE: Required edges can contain embedded NULL pointers.
|
|
473 |
|
|
474 |
//----------------- Other Node Properties
|
|
475 |
|
|
476 |
// Generate class id for some ideal nodes to avoid virtual query
|
|
477 |
// methods is_<Node>().
|
|
478 |
// Class id is the set of bits corresponded to the node class and all its
|
|
479 |
// super classes so that queries for super classes are also valid.
|
|
480 |
// Subclasses of the same super class have different assigned bit
|
|
481 |
// (the third parameter in the macro DEFINE_CLASS_ID).
|
|
482 |
// Classes with deeper hierarchy are declared first.
|
|
483 |
// Classes with the same hierarchy depth are sorted by usage frequency.
|
|
484 |
//
|
|
485 |
// The query method masks the bits to cut off bits of subclasses
|
|
486 |
// and then compare the result with the class id
|
|
487 |
// (see the macro DEFINE_CLASS_QUERY below).
|
|
488 |
//
|
|
489 |
// Class_MachCall=30, ClassMask_MachCall=31
|
|
490 |
// 12 8 4 0
|
|
491 |
// 0 0 0 0 0 0 0 0 1 1 1 1 0
|
|
492 |
// | | | |
|
|
493 |
// | | | Bit_Mach=2
|
|
494 |
// | | Bit_MachReturn=4
|
|
495 |
// | Bit_MachSafePoint=8
|
|
496 |
// Bit_MachCall=16
|
|
497 |
//
|
|
498 |
// Class_CountedLoop=56, ClassMask_CountedLoop=63
|
|
499 |
// 12 8 4 0
|
|
500 |
// 0 0 0 0 0 0 0 1 1 1 0 0 0
|
|
501 |
// | | |
|
|
502 |
// | | Bit_Region=8
|
|
503 |
// | Bit_Loop=16
|
|
504 |
// Bit_CountedLoop=32
|
|
505 |
|
|
506 |
#define DEFINE_CLASS_ID(cl, supcl, subn) \
|
|
507 |
Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
|
|
508 |
Class_##cl = Class_##supcl + Bit_##cl , \
|
|
509 |
ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
|
|
510 |
|
|
511 |
// This enum is used only for C2 ideal and mach nodes with is_<node>() methods
|
|
512 |
// so that it's values fits into 16 bits.
|
|
513 |
enum NodeClasses {
|
|
514 |
Bit_Node = 0x0000,
|
|
515 |
Class_Node = 0x0000,
|
|
516 |
ClassMask_Node = 0xFFFF,
|
|
517 |
|
|
518 |
DEFINE_CLASS_ID(Multi, Node, 0)
|
|
519 |
DEFINE_CLASS_ID(SafePoint, Multi, 0)
|
|
520 |
DEFINE_CLASS_ID(Call, SafePoint, 0)
|
|
521 |
DEFINE_CLASS_ID(CallJava, Call, 0)
|
|
522 |
DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
|
|
523 |
DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
|
|
524 |
DEFINE_CLASS_ID(CallRuntime, Call, 1)
|
|
525 |
DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
|
|
526 |
DEFINE_CLASS_ID(Allocate, Call, 2)
|
|
527 |
DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
|
|
528 |
DEFINE_CLASS_ID(AbstractLock, Call, 3)
|
|
529 |
DEFINE_CLASS_ID(Lock, AbstractLock, 0)
|
|
530 |
DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
|
|
531 |
DEFINE_CLASS_ID(MultiBranch, Multi, 1)
|
|
532 |
DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
|
|
533 |
DEFINE_CLASS_ID(Catch, PCTable, 0)
|
|
534 |
DEFINE_CLASS_ID(Jump, PCTable, 1)
|
|
535 |
DEFINE_CLASS_ID(If, MultiBranch, 1)
|
|
536 |
DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
|
|
537 |
DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
|
|
538 |
DEFINE_CLASS_ID(Start, Multi, 2)
|
|
539 |
DEFINE_CLASS_ID(MemBar, Multi, 3)
|
|
540 |
DEFINE_CLASS_ID(Initialize, MemBar, 0)
|
|
541 |
|
|
542 |
DEFINE_CLASS_ID(Mach, Node, 1)
|
|
543 |
DEFINE_CLASS_ID(MachReturn, Mach, 0)
|
|
544 |
DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
|
|
545 |
DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
|
|
546 |
DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
|
|
547 |
DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
|
|
548 |
DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
|
|
549 |
DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
|
|
550 |
DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
|
|
551 |
DEFINE_CLASS_ID(MachSpillCopy, Mach, 1)
|
|
552 |
DEFINE_CLASS_ID(MachNullCheck, Mach, 2)
|
|
553 |
DEFINE_CLASS_ID(MachIf, Mach, 3)
|
|
554 |
DEFINE_CLASS_ID(MachTemp, Mach, 4)
|
|
555 |
|
|
556 |
DEFINE_CLASS_ID(Proj, Node, 2)
|
|
557 |
DEFINE_CLASS_ID(CatchProj, Proj, 0)
|
|
558 |
DEFINE_CLASS_ID(JumpProj, Proj, 1)
|
|
559 |
DEFINE_CLASS_ID(IfTrue, Proj, 2)
|
|
560 |
DEFINE_CLASS_ID(IfFalse, Proj, 3)
|
206
|
561 |
DEFINE_CLASS_ID(Parm, Proj, 4)
|
1
|
562 |
|
|
563 |
DEFINE_CLASS_ID(Region, Node, 3)
|
|
564 |
DEFINE_CLASS_ID(Loop, Region, 0)
|
|
565 |
DEFINE_CLASS_ID(Root, Loop, 0)
|
|
566 |
DEFINE_CLASS_ID(CountedLoop, Loop, 1)
|
|
567 |
|
|
568 |
DEFINE_CLASS_ID(Sub, Node, 4)
|
|
569 |
DEFINE_CLASS_ID(Cmp, Sub, 0)
|
|
570 |
DEFINE_CLASS_ID(FastLock, Cmp, 0)
|
|
571 |
DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
|
|
572 |
|
|
573 |
DEFINE_CLASS_ID(Type, Node, 5)
|
|
574 |
DEFINE_CLASS_ID(Phi, Type, 0)
|
|
575 |
DEFINE_CLASS_ID(ConstraintCast, Type, 1)
|
|
576 |
DEFINE_CLASS_ID(CheckCastPP, Type, 2)
|
|
577 |
DEFINE_CLASS_ID(CMove, Type, 3)
|
|
578 |
|
|
579 |
DEFINE_CLASS_ID(Mem, Node, 6)
|
|
580 |
DEFINE_CLASS_ID(Load, Mem, 0)
|
|
581 |
DEFINE_CLASS_ID(Store, Mem, 1)
|
|
582 |
DEFINE_CLASS_ID(LoadStore, Mem, 2)
|
|
583 |
|
|
584 |
DEFINE_CLASS_ID(MergeMem, Node, 7)
|
|
585 |
DEFINE_CLASS_ID(Bool, Node, 8)
|
|
586 |
DEFINE_CLASS_ID(AddP, Node, 9)
|
|
587 |
DEFINE_CLASS_ID(BoxLock, Node, 10)
|
|
588 |
DEFINE_CLASS_ID(Add, Node, 11)
|
|
589 |
DEFINE_CLASS_ID(Mul, Node, 12)
|
|
590 |
|
|
591 |
_max_classes = ClassMask_Mul
|
|
592 |
};
|
|
593 |
#undef DEFINE_CLASS_ID
|
|
594 |
|
|
595 |
// Flags are sorted by usage frequency.
|
|
596 |
enum NodeFlags {
|
|
597 |
Flag_is_Copy = 0x01, // should be first bit to avoid shift
|
|
598 |
Flag_is_Call = Flag_is_Copy << 1,
|
|
599 |
Flag_rematerialize = Flag_is_Call << 1,
|
|
600 |
Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
|
|
601 |
Flag_is_macro = Flag_needs_anti_dependence_check << 1,
|
|
602 |
Flag_is_Con = Flag_is_macro << 1,
|
|
603 |
Flag_is_cisc_alternate = Flag_is_Con << 1,
|
|
604 |
Flag_is_Branch = Flag_is_cisc_alternate << 1,
|
|
605 |
Flag_is_block_start = Flag_is_Branch << 1,
|
|
606 |
Flag_is_Goto = Flag_is_block_start << 1,
|
|
607 |
Flag_is_dead_loop_safe = Flag_is_Goto << 1,
|
|
608 |
Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
|
|
609 |
Flag_is_safepoint_node = Flag_may_be_short_branch << 1,
|
|
610 |
Flag_is_pc_relative = Flag_is_safepoint_node << 1,
|
|
611 |
Flag_is_Vector = Flag_is_pc_relative << 1,
|
|
612 |
_max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination
|
|
613 |
};
|
|
614 |
|
|
615 |
private:
|
|
616 |
jushort _class_id;
|
|
617 |
jushort _flags;
|
|
618 |
|
|
619 |
protected:
|
|
620 |
// These methods should be called from constructors only.
|
|
621 |
void init_class_id(jushort c) {
|
|
622 |
assert(c <= _max_classes, "invalid node class");
|
|
623 |
_class_id = c; // cast out const
|
|
624 |
}
|
|
625 |
void init_flags(jushort fl) {
|
|
626 |
assert(fl <= _max_flags, "invalid node flag");
|
|
627 |
_flags |= fl;
|
|
628 |
}
|
|
629 |
void clear_flag(jushort fl) {
|
|
630 |
assert(fl <= _max_flags, "invalid node flag");
|
|
631 |
_flags &= ~fl;
|
|
632 |
}
|
|
633 |
|
|
634 |
public:
|
|
635 |
const jushort class_id() const { return _class_id; }
|
|
636 |
|
|
637 |
const jushort flags() const { return _flags; }
|
|
638 |
|
|
639 |
// Return a dense integer opcode number
|
|
640 |
virtual int Opcode() const;
|
|
641 |
|
|
642 |
// Virtual inherited Node size
|
|
643 |
virtual uint size_of() const;
|
|
644 |
|
|
645 |
// Other interesting Node properties
|
|
646 |
|
|
647 |
// Special case: is_Call() returns true for both CallNode and MachCallNode.
|
|
648 |
bool is_Call() const {
|
|
649 |
return (_flags & Flag_is_Call) != 0;
|
|
650 |
}
|
|
651 |
|
|
652 |
CallNode *as_Call() const { // Only for CallNode (not for MachCallNode)
|
|
653 |
assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class");
|
|
654 |
return (CallNode*)this;
|
|
655 |
}
|
|
656 |
|
|
657 |
#define DEFINE_CLASS_QUERY(type) \
|
|
658 |
bool is_##type() const { \
|
|
659 |
return ((_class_id & ClassMask_##type) == Class_##type); \
|
|
660 |
} \
|
|
661 |
type##Node *as_##type() const { \
|
|
662 |
assert(is_##type(), "invalid node class"); \
|
|
663 |
return (type##Node*)this; \
|
|
664 |
}
|
|
665 |
|
|
666 |
DEFINE_CLASS_QUERY(AbstractLock)
|
|
667 |
DEFINE_CLASS_QUERY(Add)
|
|
668 |
DEFINE_CLASS_QUERY(AddP)
|
|
669 |
DEFINE_CLASS_QUERY(Allocate)
|
|
670 |
DEFINE_CLASS_QUERY(AllocateArray)
|
|
671 |
DEFINE_CLASS_QUERY(Bool)
|
|
672 |
DEFINE_CLASS_QUERY(BoxLock)
|
|
673 |
DEFINE_CLASS_QUERY(CallDynamicJava)
|
|
674 |
DEFINE_CLASS_QUERY(CallJava)
|
|
675 |
DEFINE_CLASS_QUERY(CallLeaf)
|
|
676 |
DEFINE_CLASS_QUERY(CallRuntime)
|
|
677 |
DEFINE_CLASS_QUERY(CallStaticJava)
|
|
678 |
DEFINE_CLASS_QUERY(Catch)
|
|
679 |
DEFINE_CLASS_QUERY(CatchProj)
|
|
680 |
DEFINE_CLASS_QUERY(CheckCastPP)
|
|
681 |
DEFINE_CLASS_QUERY(ConstraintCast)
|
|
682 |
DEFINE_CLASS_QUERY(CMove)
|
|
683 |
DEFINE_CLASS_QUERY(Cmp)
|
|
684 |
DEFINE_CLASS_QUERY(CountedLoop)
|
|
685 |
DEFINE_CLASS_QUERY(CountedLoopEnd)
|
|
686 |
DEFINE_CLASS_QUERY(FastLock)
|
|
687 |
DEFINE_CLASS_QUERY(FastUnlock)
|
|
688 |
DEFINE_CLASS_QUERY(If)
|
|
689 |
DEFINE_CLASS_QUERY(IfFalse)
|
|
690 |
DEFINE_CLASS_QUERY(IfTrue)
|
|
691 |
DEFINE_CLASS_QUERY(Initialize)
|
|
692 |
DEFINE_CLASS_QUERY(Jump)
|
|
693 |
DEFINE_CLASS_QUERY(JumpProj)
|
|
694 |
DEFINE_CLASS_QUERY(Load)
|
|
695 |
DEFINE_CLASS_QUERY(LoadStore)
|
|
696 |
DEFINE_CLASS_QUERY(Lock)
|
|
697 |
DEFINE_CLASS_QUERY(Loop)
|
|
698 |
DEFINE_CLASS_QUERY(Mach)
|
|
699 |
DEFINE_CLASS_QUERY(MachCall)
|
|
700 |
DEFINE_CLASS_QUERY(MachCallDynamicJava)
|
|
701 |
DEFINE_CLASS_QUERY(MachCallJava)
|
|
702 |
DEFINE_CLASS_QUERY(MachCallLeaf)
|
|
703 |
DEFINE_CLASS_QUERY(MachCallRuntime)
|
|
704 |
DEFINE_CLASS_QUERY(MachCallStaticJava)
|
|
705 |
DEFINE_CLASS_QUERY(MachIf)
|
|
706 |
DEFINE_CLASS_QUERY(MachNullCheck)
|
|
707 |
DEFINE_CLASS_QUERY(MachReturn)
|
|
708 |
DEFINE_CLASS_QUERY(MachSafePoint)
|
|
709 |
DEFINE_CLASS_QUERY(MachSpillCopy)
|
|
710 |
DEFINE_CLASS_QUERY(MachTemp)
|
|
711 |
DEFINE_CLASS_QUERY(Mem)
|
|
712 |
DEFINE_CLASS_QUERY(MemBar)
|
|
713 |
DEFINE_CLASS_QUERY(MergeMem)
|
|
714 |
DEFINE_CLASS_QUERY(Mul)
|
|
715 |
DEFINE_CLASS_QUERY(Multi)
|
|
716 |
DEFINE_CLASS_QUERY(MultiBranch)
|
206
|
717 |
DEFINE_CLASS_QUERY(Parm)
|
1
|
718 |
DEFINE_CLASS_QUERY(PCTable)
|
|
719 |
DEFINE_CLASS_QUERY(Phi)
|
|
720 |
DEFINE_CLASS_QUERY(Proj)
|
|
721 |
DEFINE_CLASS_QUERY(Region)
|
|
722 |
DEFINE_CLASS_QUERY(Root)
|
|
723 |
DEFINE_CLASS_QUERY(SafePoint)
|
|
724 |
DEFINE_CLASS_QUERY(Start)
|
|
725 |
DEFINE_CLASS_QUERY(Store)
|
|
726 |
DEFINE_CLASS_QUERY(Sub)
|
|
727 |
DEFINE_CLASS_QUERY(Type)
|
|
728 |
DEFINE_CLASS_QUERY(Unlock)
|
|
729 |
|
|
730 |
#undef DEFINE_CLASS_QUERY
|
|
731 |
|
|
732 |
// duplicate of is_MachSpillCopy()
|
|
733 |
bool is_SpillCopy () const {
|
|
734 |
return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
|
|
735 |
}
|
|
736 |
|
|
737 |
bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
|
|
738 |
bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; }
|
|
739 |
// The data node which is safe to leave in dead loop during IGVN optimization.
|
|
740 |
bool is_dead_loop_safe() const {
|
|
741 |
return is_Phi() || is_Proj() ||
|
|
742 |
(_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0;
|
|
743 |
}
|
|
744 |
|
|
745 |
// is_Copy() returns copied edge index (0 or 1)
|
|
746 |
uint is_Copy() const { return (_flags & Flag_is_Copy); }
|
|
747 |
|
|
748 |
virtual bool is_CFG() const { return false; }
|
|
749 |
|
|
750 |
// If this node is control-dependent on a test, can it be
|
|
751 |
// rerouted to a dominating equivalent test? This is usually
|
|
752 |
// true of non-CFG nodes, but can be false for operations which
|
|
753 |
// depend for their correct sequencing on more than one test.
|
|
754 |
// (In that case, hoisting to a dominating test may silently
|
|
755 |
// skip some other important test.)
|
|
756 |
virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
|
|
757 |
|
|
758 |
// defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd'
|
|
759 |
bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; }
|
|
760 |
|
|
761 |
// When building basic blocks, I need to have a notion of block beginning
|
|
762 |
// Nodes, next block selector Nodes (block enders), and next block
|
|
763 |
// projections. These calls need to work on their machine equivalents. The
|
|
764 |
// Ideal beginning Nodes are RootNode, RegionNode and StartNode.
|
|
765 |
bool is_block_start() const {
|
|
766 |
if ( is_Region() )
|
|
767 |
return this == (const Node*)in(0);
|
|
768 |
else
|
|
769 |
return (_flags & Flag_is_block_start) != 0;
|
|
770 |
}
|
|
771 |
|
|
772 |
// The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
|
|
773 |
// Goto and Return. This call also returns the block ending Node.
|
|
774 |
virtual const Node *is_block_proj() const;
|
|
775 |
|
|
776 |
// The node is a "macro" node which needs to be expanded before matching
|
|
777 |
bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
|
|
778 |
|
|
779 |
// Value is a vector of primitive values
|
|
780 |
bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; }
|
|
781 |
|
|
782 |
//----------------- Optimization
|
|
783 |
|
|
784 |
// Get the worst-case Type output for this Node.
|
|
785 |
virtual const class Type *bottom_type() const;
|
|
786 |
|
|
787 |
// If we find a better type for a node, try to record it permanently.
|
|
788 |
// Return true if this node actually changed.
|
|
789 |
// Be sure to do the hash_delete game in the "rehash" variant.
|
|
790 |
void raise_bottom_type(const Type* new_type);
|
|
791 |
|
|
792 |
// Get the address type with which this node uses and/or defs memory,
|
|
793 |
// or NULL if none. The address type is conservatively wide.
|
|
794 |
// Returns non-null for calls, membars, loads, stores, etc.
|
|
795 |
// Returns TypePtr::BOTTOM if the node touches memory "broadly".
|
|
796 |
virtual const class TypePtr *adr_type() const { return NULL; }
|
|
797 |
|
|
798 |
// Return an existing node which computes the same function as this node.
|
|
799 |
// The optimistic combined algorithm requires this to return a Node which
|
|
800 |
// is a small number of steps away (e.g., one of my inputs).
|
|
801 |
virtual Node *Identity( PhaseTransform *phase );
|
|
802 |
|
|
803 |
// Return the set of values this Node can take on at runtime.
|
|
804 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
805 |
|
|
806 |
// Return a node which is more "ideal" than the current node.
|
|
807 |
// The invariants on this call are subtle. If in doubt, read the
|
|
808 |
// treatise in node.cpp above the default implemention AND TEST WITH
|
|
809 |
// +VerifyIterativeGVN!
|
|
810 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
811 |
|
|
812 |
// Some nodes have specific Ideal subgraph transformations only if they are
|
|
813 |
// unique users of specific nodes. Such nodes should be put on IGVN worklist
|
|
814 |
// for the transformations to happen.
|
|
815 |
bool has_special_unique_user() const;
|
|
816 |
|
|
817 |
protected:
|
|
818 |
bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
|
|
819 |
public:
|
|
820 |
|
|
821 |
// Idealize graph, using DU info. Done after constant propagation
|
|
822 |
virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
|
|
823 |
|
|
824 |
// See if there is valid pipeline info
|
|
825 |
static const Pipeline *pipeline_class();
|
|
826 |
virtual const Pipeline *pipeline() const;
|
|
827 |
|
|
828 |
// Compute the latency from the def to this instruction of the ith input node
|
|
829 |
uint latency(uint i);
|
|
830 |
|
|
831 |
// Hash & compare functions, for pessimistic value numbering
|
|
832 |
|
|
833 |
// If the hash function returns the special sentinel value NO_HASH,
|
|
834 |
// the node is guaranteed never to compare equal to any other node.
|
|
835 |
// If we accidently generate a hash with value NO_HASH the node
|
|
836 |
// won't go into the table and we'll lose a little optimization.
|
|
837 |
enum { NO_HASH = 0 };
|
|
838 |
virtual uint hash() const;
|
|
839 |
virtual uint cmp( const Node &n ) const;
|
|
840 |
|
|
841 |
// Operation appears to be iteratively computed (such as an induction variable)
|
|
842 |
// It is possible for this operation to return false for a loop-varying
|
|
843 |
// value, if it appears (by local graph inspection) to be computed by a simple conditional.
|
|
844 |
bool is_iteratively_computed();
|
|
845 |
|
|
846 |
// Determine if a node is Counted loop induction variable.
|
|
847 |
// The method is defined in loopnode.cpp.
|
|
848 |
const Node* is_loop_iv() const;
|
|
849 |
|
|
850 |
// Return a node with opcode "opc" and same inputs as "this" if one can
|
|
851 |
// be found; Otherwise return NULL;
|
|
852 |
Node* find_similar(int opc);
|
|
853 |
|
|
854 |
// Return the unique control out if only one. Null if none or more than one.
|
|
855 |
Node* unique_ctrl_out();
|
|
856 |
|
|
857 |
//----------------- Code Generation
|
|
858 |
|
|
859 |
// Ideal register class for Matching. Zero means unmatched instruction
|
|
860 |
// (these are cloned instead of converted to machine nodes).
|
|
861 |
virtual uint ideal_reg() const;
|
|
862 |
|
|
863 |
static const uint NotAMachineReg; // must be > max. machine register
|
|
864 |
|
|
865 |
// Do we Match on this edge index or not? Generally false for Control
|
|
866 |
// and true for everything else. Weird for calls & returns.
|
|
867 |
virtual uint match_edge(uint idx) const;
|
|
868 |
|
|
869 |
// Register class output is returned in
|
|
870 |
virtual const RegMask &out_RegMask() const;
|
|
871 |
// Register class input is expected in
|
|
872 |
virtual const RegMask &in_RegMask(uint) const;
|
|
873 |
// Should we clone rather than spill this instruction?
|
|
874 |
bool rematerialize() const;
|
|
875 |
|
|
876 |
// Return JVM State Object if this Node carries debug info, or NULL otherwise
|
|
877 |
virtual JVMState* jvms() const;
|
|
878 |
|
|
879 |
// Print as assembly
|
|
880 |
virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
|
|
881 |
// Emit bytes starting at parameter 'ptr'
|
|
882 |
// Bump 'ptr' by the number of output bytes
|
|
883 |
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
|
|
884 |
// Size of instruction in bytes
|
|
885 |
virtual uint size(PhaseRegAlloc *ra_) const;
|
|
886 |
|
|
887 |
// Convenience function to extract an integer constant from a node.
|
|
888 |
// If it is not an integer constant (either Con, CastII, or Mach),
|
|
889 |
// return value_if_unknown.
|
|
890 |
jint find_int_con(jint value_if_unknown) const {
|
|
891 |
const TypeInt* t = find_int_type();
|
|
892 |
return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
|
|
893 |
}
|
|
894 |
// Return the constant, knowing it is an integer constant already
|
|
895 |
jint get_int() const {
|
|
896 |
const TypeInt* t = find_int_type();
|
|
897 |
guarantee(t != NULL, "must be con");
|
|
898 |
return t->get_con();
|
|
899 |
}
|
|
900 |
// Here's where the work is done. Can produce non-constant int types too.
|
|
901 |
const TypeInt* find_int_type() const;
|
|
902 |
|
|
903 |
// Same thing for long (and intptr_t, via type.hpp):
|
|
904 |
jlong get_long() const {
|
|
905 |
const TypeLong* t = find_long_type();
|
|
906 |
guarantee(t != NULL, "must be con");
|
|
907 |
return t->get_con();
|
|
908 |
}
|
|
909 |
jlong find_long_con(jint value_if_unknown) const {
|
|
910 |
const TypeLong* t = find_long_type();
|
|
911 |
return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
|
|
912 |
}
|
|
913 |
const TypeLong* find_long_type() const;
|
|
914 |
|
|
915 |
// These guys are called by code generated by ADLC:
|
|
916 |
intptr_t get_ptr() const;
|
|
917 |
jdouble getd() const;
|
|
918 |
jfloat getf() const;
|
|
919 |
|
|
920 |
// Nodes which are pinned into basic blocks
|
|
921 |
virtual bool pinned() const { return false; }
|
|
922 |
|
|
923 |
// Nodes which use memory without consuming it, hence need antidependences
|
|
924 |
// More specifically, needs_anti_dependence_check returns true iff the node
|
|
925 |
// (a) does a load, and (b) does not perform a store (except perhaps to a
|
|
926 |
// stack slot or some other unaliased location).
|
|
927 |
bool needs_anti_dependence_check() const;
|
|
928 |
|
|
929 |
// Return which operand this instruction may cisc-spill. In other words,
|
|
930 |
// return operand position that can convert from reg to memory access
|
|
931 |
virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
|
|
932 |
bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
|
|
933 |
|
|
934 |
//----------------- Graph walking
|
|
935 |
public:
|
|
936 |
// Walk and apply member functions recursively.
|
|
937 |
// Supplied (this) pointer is root.
|
|
938 |
void walk(NFunc pre, NFunc post, void *env);
|
|
939 |
static void nop(Node &, void*); // Dummy empty function
|
|
940 |
static void packregion( Node &n, void* );
|
|
941 |
private:
|
|
942 |
void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited);
|
|
943 |
|
|
944 |
//----------------- Printing, etc
|
|
945 |
public:
|
|
946 |
#ifndef PRODUCT
|
|
947 |
Node* find(int idx) const; // Search the graph for the given idx.
|
|
948 |
Node* find_ctrl(int idx) const; // Search control ancestors for the given idx.
|
|
949 |
void dump() const; // Print this node,
|
|
950 |
void dump(int depth) const; // Print this node, recursively to depth d
|
|
951 |
void dump_ctrl(int depth) const; // Print control nodes, to depth d
|
|
952 |
virtual void dump_req() const; // Print required-edge info
|
|
953 |
virtual void dump_prec() const; // Print precedence-edge info
|
|
954 |
virtual void dump_out() const; // Print the output edge info
|
|
955 |
virtual void dump_spec(outputStream *st) const {}; // Print per-node info
|
|
956 |
void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
|
|
957 |
void verify() const; // Check Def-Use info for my subgraph
|
|
958 |
static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
|
|
959 |
|
|
960 |
// This call defines a class-unique string used to identify class instances
|
|
961 |
virtual const char *Name() const;
|
|
962 |
|
|
963 |
void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
|
|
964 |
// RegMask Print Functions
|
|
965 |
void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
|
|
966 |
void dump_out_regmask() { out_RegMask().dump(); }
|
|
967 |
static int _in_dump_cnt;
|
|
968 |
static bool in_dump() { return _in_dump_cnt > 0; }
|
|
969 |
void fast_dump() const {
|
|
970 |
tty->print("%4d: %-17s", _idx, Name());
|
|
971 |
for (uint i = 0; i < len(); i++)
|
|
972 |
if (in(i))
|
|
973 |
tty->print(" %4d", in(i)->_idx);
|
|
974 |
else
|
|
975 |
tty->print(" NULL");
|
|
976 |
tty->print("\n");
|
|
977 |
}
|
|
978 |
#endif
|
|
979 |
#ifdef ASSERT
|
|
980 |
void verify_construction();
|
|
981 |
bool verify_jvms(const JVMState* jvms) const;
|
|
982 |
int _debug_idx; // Unique value assigned to every node.
|
|
983 |
int debug_idx() const { return _debug_idx; }
|
|
984 |
void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
|
|
985 |
|
|
986 |
Node* _debug_orig; // Original version of this, if any.
|
|
987 |
Node* debug_orig() const { return _debug_orig; }
|
|
988 |
void set_debug_orig(Node* orig); // _debug_orig = orig
|
|
989 |
|
|
990 |
int _hash_lock; // Barrier to modifications of nodes in the hash table
|
|
991 |
void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
|
|
992 |
void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
|
|
993 |
|
|
994 |
static void init_NodeProperty();
|
|
995 |
|
|
996 |
#if OPTO_DU_ITERATOR_ASSERT
|
|
997 |
const Node* _last_del; // The last deleted node.
|
|
998 |
uint _del_tick; // Bumped when a deletion happens..
|
|
999 |
#endif
|
|
1000 |
#endif
|
|
1001 |
};
|
|
1002 |
|
|
1003 |
//-----------------------------------------------------------------------------
|
|
1004 |
// Iterators over DU info, and associated Node functions.
|
|
1005 |
|
|
1006 |
#if OPTO_DU_ITERATOR_ASSERT
|
|
1007 |
|
|
1008 |
// Common code for assertion checking on DU iterators.
|
|
1009 |
class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
|
|
1010 |
#ifdef ASSERT
|
|
1011 |
protected:
|
|
1012 |
bool _vdui; // cached value of VerifyDUIterators
|
|
1013 |
const Node* _node; // the node containing the _out array
|
|
1014 |
uint _outcnt; // cached node->_outcnt
|
|
1015 |
uint _del_tick; // cached node->_del_tick
|
|
1016 |
Node* _last; // last value produced by the iterator
|
|
1017 |
|
|
1018 |
void sample(const Node* node); // used by c'tor to set up for verifies
|
|
1019 |
void verify(const Node* node, bool at_end_ok = false);
|
|
1020 |
void verify_resync();
|
|
1021 |
void reset(const DUIterator_Common& that);
|
|
1022 |
|
|
1023 |
// The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
|
|
1024 |
#define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
|
|
1025 |
#else
|
|
1026 |
#define I_VDUI_ONLY(i,x) { }
|
|
1027 |
#endif //ASSERT
|
|
1028 |
};
|
|
1029 |
|
|
1030 |
#define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
|
|
1031 |
|
|
1032 |
// Default DU iterator. Allows appends onto the out array.
|
|
1033 |
// Allows deletion from the out array only at the current point.
|
|
1034 |
// Usage:
|
|
1035 |
// for (DUIterator i = x->outs(); x->has_out(i); i++) {
|
|
1036 |
// Node* y = x->out(i);
|
|
1037 |
// ...
|
|
1038 |
// }
|
|
1039 |
// Compiles in product mode to a unsigned integer index, which indexes
|
|
1040 |
// onto a repeatedly reloaded base pointer of x->_out. The loop predicate
|
|
1041 |
// also reloads x->_outcnt. If you delete, you must perform "--i" just
|
|
1042 |
// before continuing the loop. You must delete only the last-produced
|
|
1043 |
// edge. You must delete only a single copy of the last-produced edge,
|
|
1044 |
// or else you must delete all copies at once (the first time the edge
|
|
1045 |
// is produced by the iterator).
|
|
1046 |
class DUIterator : public DUIterator_Common {
|
|
1047 |
friend class Node;
|
|
1048 |
|
|
1049 |
// This is the index which provides the product-mode behavior.
|
|
1050 |
// Whatever the product-mode version of the system does to the
|
|
1051 |
// DUI index is done to this index. All other fields in
|
|
1052 |
// this class are used only for assertion checking.
|
|
1053 |
uint _idx;
|
|
1054 |
|
|
1055 |
#ifdef ASSERT
|
|
1056 |
uint _refresh_tick; // Records the refresh activity.
|
|
1057 |
|
|
1058 |
void sample(const Node* node); // Initialize _refresh_tick etc.
|
|
1059 |
void verify(const Node* node, bool at_end_ok = false);
|
|
1060 |
void verify_increment(); // Verify an increment operation.
|
|
1061 |
void verify_resync(); // Verify that we can back up over a deletion.
|
|
1062 |
void verify_finish(); // Verify that the loop terminated properly.
|
|
1063 |
void refresh(); // Resample verification info.
|
|
1064 |
void reset(const DUIterator& that); // Resample after assignment.
|
|
1065 |
#endif
|
|
1066 |
|
|
1067 |
DUIterator(const Node* node, int dummy_to_avoid_conversion)
|
|
1068 |
{ _idx = 0; debug_only(sample(node)); }
|
|
1069 |
|
|
1070 |
public:
|
|
1071 |
// initialize to garbage; clear _vdui to disable asserts
|
|
1072 |
DUIterator()
|
|
1073 |
{ /*initialize to garbage*/ debug_only(_vdui = false); }
|
|
1074 |
|
|
1075 |
void operator++(int dummy_to_specify_postfix_op)
|
|
1076 |
{ _idx++; VDUI_ONLY(verify_increment()); }
|
|
1077 |
|
|
1078 |
void operator--()
|
|
1079 |
{ VDUI_ONLY(verify_resync()); --_idx; }
|
|
1080 |
|
|
1081 |
~DUIterator()
|
|
1082 |
{ VDUI_ONLY(verify_finish()); }
|
|
1083 |
|
|
1084 |
void operator=(const DUIterator& that)
|
|
1085 |
{ _idx = that._idx; debug_only(reset(that)); }
|
|
1086 |
};
|
|
1087 |
|
|
1088 |
DUIterator Node::outs() const
|
|
1089 |
{ return DUIterator(this, 0); }
|
|
1090 |
DUIterator& Node::refresh_out_pos(DUIterator& i) const
|
|
1091 |
{ I_VDUI_ONLY(i, i.refresh()); return i; }
|
|
1092 |
bool Node::has_out(DUIterator& i) const
|
|
1093 |
{ I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
|
|
1094 |
Node* Node::out(DUIterator& i) const
|
|
1095 |
{ I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; }
|
|
1096 |
|
|
1097 |
|
|
1098 |
// Faster DU iterator. Disallows insertions into the out array.
|
|
1099 |
// Allows deletion from the out array only at the current point.
|
|
1100 |
// Usage:
|
|
1101 |
// for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
|
|
1102 |
// Node* y = x->fast_out(i);
|
|
1103 |
// ...
|
|
1104 |
// }
|
|
1105 |
// Compiles in product mode to raw Node** pointer arithmetic, with
|
|
1106 |
// no reloading of pointers from the original node x. If you delete,
|
|
1107 |
// you must perform "--i; --imax" just before continuing the loop.
|
|
1108 |
// If you delete multiple copies of the same edge, you must decrement
|
|
1109 |
// imax, but not i, multiple times: "--i, imax -= num_edges".
|
|
1110 |
class DUIterator_Fast : public DUIterator_Common {
|
|
1111 |
friend class Node;
|
|
1112 |
friend class DUIterator_Last;
|
|
1113 |
|
|
1114 |
// This is the pointer which provides the product-mode behavior.
|
|
1115 |
// Whatever the product-mode version of the system does to the
|
|
1116 |
// DUI pointer is done to this pointer. All other fields in
|
|
1117 |
// this class are used only for assertion checking.
|
|
1118 |
Node** _outp;
|
|
1119 |
|
|
1120 |
#ifdef ASSERT
|
|
1121 |
void verify(const Node* node, bool at_end_ok = false);
|
|
1122 |
void verify_limit();
|
|
1123 |
void verify_resync();
|
|
1124 |
void verify_relimit(uint n);
|
|
1125 |
void reset(const DUIterator_Fast& that);
|
|
1126 |
#endif
|
|
1127 |
|
|
1128 |
// Note: offset must be signed, since -1 is sometimes passed
|
|
1129 |
DUIterator_Fast(const Node* node, ptrdiff_t offset)
|
|
1130 |
{ _outp = node->_out + offset; debug_only(sample(node)); }
|
|
1131 |
|
|
1132 |
public:
|
|
1133 |
// initialize to garbage; clear _vdui to disable asserts
|
|
1134 |
DUIterator_Fast()
|
|
1135 |
{ /*initialize to garbage*/ debug_only(_vdui = false); }
|
|
1136 |
|
|
1137 |
void operator++(int dummy_to_specify_postfix_op)
|
|
1138 |
{ _outp++; VDUI_ONLY(verify(_node, true)); }
|
|
1139 |
|
|
1140 |
void operator--()
|
|
1141 |
{ VDUI_ONLY(verify_resync()); --_outp; }
|
|
1142 |
|
|
1143 |
void operator-=(uint n) // applied to the limit only
|
|
1144 |
{ _outp -= n; VDUI_ONLY(verify_relimit(n)); }
|
|
1145 |
|
|
1146 |
bool operator<(DUIterator_Fast& limit) {
|
|
1147 |
I_VDUI_ONLY(*this, this->verify(_node, true));
|
|
1148 |
I_VDUI_ONLY(limit, limit.verify_limit());
|
|
1149 |
return _outp < limit._outp;
|
|
1150 |
}
|
|
1151 |
|
|
1152 |
void operator=(const DUIterator_Fast& that)
|
|
1153 |
{ _outp = that._outp; debug_only(reset(that)); }
|
|
1154 |
};
|
|
1155 |
|
|
1156 |
DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
|
|
1157 |
// Assign a limit pointer to the reference argument:
|
|
1158 |
imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
|
|
1159 |
// Return the base pointer:
|
|
1160 |
return DUIterator_Fast(this, 0);
|
|
1161 |
}
|
|
1162 |
Node* Node::fast_out(DUIterator_Fast& i) const {
|
|
1163 |
I_VDUI_ONLY(i, i.verify(this));
|
|
1164 |
return debug_only(i._last=) *i._outp;
|
|
1165 |
}
|
|
1166 |
|
|
1167 |
|
|
1168 |
// Faster DU iterator. Requires each successive edge to be removed.
|
|
1169 |
// Does not allow insertion of any edges.
|
|
1170 |
// Usage:
|
|
1171 |
// for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
|
|
1172 |
// Node* y = x->last_out(i);
|
|
1173 |
// ...
|
|
1174 |
// }
|
|
1175 |
// Compiles in product mode to raw Node** pointer arithmetic, with
|
|
1176 |
// no reloading of pointers from the original node x.
|
|
1177 |
class DUIterator_Last : private DUIterator_Fast {
|
|
1178 |
friend class Node;
|
|
1179 |
|
|
1180 |
#ifdef ASSERT
|
|
1181 |
void verify(const Node* node, bool at_end_ok = false);
|
|
1182 |
void verify_limit();
|
|
1183 |
void verify_step(uint num_edges);
|
|
1184 |
#endif
|
|
1185 |
|
|
1186 |
// Note: offset must be signed, since -1 is sometimes passed
|
|
1187 |
DUIterator_Last(const Node* node, ptrdiff_t offset)
|
|
1188 |
: DUIterator_Fast(node, offset) { }
|
|
1189 |
|
|
1190 |
void operator++(int dummy_to_specify_postfix_op) {} // do not use
|
|
1191 |
void operator<(int) {} // do not use
|
|
1192 |
|
|
1193 |
public:
|
|
1194 |
DUIterator_Last() { }
|
|
1195 |
// initialize to garbage
|
|
1196 |
|
|
1197 |
void operator--()
|
|
1198 |
{ _outp--; VDUI_ONLY(verify_step(1)); }
|
|
1199 |
|
|
1200 |
void operator-=(uint n)
|
|
1201 |
{ _outp -= n; VDUI_ONLY(verify_step(n)); }
|
|
1202 |
|
|
1203 |
bool operator>=(DUIterator_Last& limit) {
|
|
1204 |
I_VDUI_ONLY(*this, this->verify(_node, true));
|
|
1205 |
I_VDUI_ONLY(limit, limit.verify_limit());
|
|
1206 |
return _outp >= limit._outp;
|
|
1207 |
}
|
|
1208 |
|
|
1209 |
void operator=(const DUIterator_Last& that)
|
|
1210 |
{ DUIterator_Fast::operator=(that); }
|
|
1211 |
};
|
|
1212 |
|
|
1213 |
DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
|
|
1214 |
// Assign a limit pointer to the reference argument:
|
|
1215 |
imin = DUIterator_Last(this, 0);
|
|
1216 |
// Return the initial pointer:
|
|
1217 |
return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
|
|
1218 |
}
|
|
1219 |
Node* Node::last_out(DUIterator_Last& i) const {
|
|
1220 |
I_VDUI_ONLY(i, i.verify(this));
|
|
1221 |
return debug_only(i._last=) *i._outp;
|
|
1222 |
}
|
|
1223 |
|
|
1224 |
#endif //OPTO_DU_ITERATOR_ASSERT
|
|
1225 |
|
|
1226 |
#undef I_VDUI_ONLY
|
|
1227 |
#undef VDUI_ONLY
|
|
1228 |
|
|
1229 |
|
|
1230 |
//-----------------------------------------------------------------------------
|
|
1231 |
// Map dense integer indices to Nodes. Uses classic doubling-array trick.
|
|
1232 |
// Abstractly provides an infinite array of Node*'s, initialized to NULL.
|
|
1233 |
// Note that the constructor just zeros things, and since I use Arena
|
|
1234 |
// allocation I do not need a destructor to reclaim storage.
|
|
1235 |
class Node_Array : public ResourceObj {
|
|
1236 |
protected:
|
|
1237 |
Arena *_a; // Arena to allocate in
|
|
1238 |
uint _max;
|
|
1239 |
Node **_nodes;
|
|
1240 |
void grow( uint i ); // Grow array node to fit
|
|
1241 |
public:
|
|
1242 |
Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) {
|
|
1243 |
_nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize );
|
|
1244 |
for( int i = 0; i < OptoNodeListSize; i++ ) {
|
|
1245 |
_nodes[i] = NULL;
|
|
1246 |
}
|
|
1247 |
}
|
|
1248 |
|
|
1249 |
Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
|
|
1250 |
Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
|
|
1251 |
{ return (i<_max) ? _nodes[i] : (Node*)NULL; }
|
|
1252 |
Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; }
|
|
1253 |
Node **adr() { return _nodes; }
|
|
1254 |
// Extend the mapping: index i maps to Node *n.
|
|
1255 |
void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
|
|
1256 |
void insert( uint i, Node *n );
|
|
1257 |
void remove( uint i ); // Remove, preserving order
|
|
1258 |
void sort( C_sort_func_t func);
|
|
1259 |
void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage
|
|
1260 |
void clear(); // Set all entries to NULL, keep storage
|
|
1261 |
uint Size() const { return _max; }
|
|
1262 |
void dump() const;
|
|
1263 |
};
|
|
1264 |
|
|
1265 |
class Node_List : public Node_Array {
|
|
1266 |
uint _cnt;
|
|
1267 |
public:
|
|
1268 |
Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
|
|
1269 |
Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
|
|
1270 |
void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
|
|
1271 |
void remove( uint i ) { Node_Array::remove(i); _cnt--; }
|
|
1272 |
void push( Node *b ) { map(_cnt++,b); }
|
|
1273 |
void yank( Node *n ); // Find and remove
|
|
1274 |
Node *pop() { return _nodes[--_cnt]; }
|
|
1275 |
Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;}
|
|
1276 |
void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
|
|
1277 |
uint size() const { return _cnt; }
|
|
1278 |
void dump() const;
|
|
1279 |
};
|
|
1280 |
|
|
1281 |
//------------------------------Unique_Node_List-------------------------------
|
|
1282 |
class Unique_Node_List : public Node_List {
|
|
1283 |
VectorSet _in_worklist;
|
|
1284 |
uint _clock_index; // Index in list where to pop from next
|
|
1285 |
public:
|
|
1286 |
Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {}
|
|
1287 |
Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
|
|
1288 |
|
|
1289 |
void remove( Node *n );
|
|
1290 |
bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
|
|
1291 |
VectorSet &member_set(){ return _in_worklist; }
|
|
1292 |
|
|
1293 |
void push( Node *b ) {
|
|
1294 |
if( !_in_worklist.test_set(b->_idx) )
|
|
1295 |
Node_List::push(b);
|
|
1296 |
}
|
|
1297 |
Node *pop() {
|
|
1298 |
if( _clock_index >= size() ) _clock_index = 0;
|
|
1299 |
Node *b = at(_clock_index);
|
|
1300 |
map( _clock_index++, Node_List::pop());
|
|
1301 |
_in_worklist >>= b->_idx;
|
|
1302 |
return b;
|
|
1303 |
}
|
|
1304 |
Node *remove( uint i ) {
|
|
1305 |
Node *b = Node_List::at(i);
|
|
1306 |
_in_worklist >>= b->_idx;
|
|
1307 |
map(i,Node_List::pop());
|
|
1308 |
return b;
|
|
1309 |
}
|
|
1310 |
void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); }
|
|
1311 |
void clear() {
|
|
1312 |
_in_worklist.Clear(); // Discards storage but grows automatically
|
|
1313 |
Node_List::clear();
|
|
1314 |
_clock_index = 0;
|
|
1315 |
}
|
|
1316 |
|
|
1317 |
// Used after parsing to remove useless nodes before Iterative GVN
|
|
1318 |
void remove_useless_nodes(VectorSet &useful);
|
|
1319 |
|
|
1320 |
#ifndef PRODUCT
|
|
1321 |
void print_set() const { _in_worklist.print(); }
|
|
1322 |
#endif
|
|
1323 |
};
|
|
1324 |
|
|
1325 |
// Inline definition of Compile::record_for_igvn must be deferred to this point.
|
|
1326 |
inline void Compile::record_for_igvn(Node* n) {
|
|
1327 |
_for_igvn->push(n);
|
|
1328 |
record_for_escape_analysis(n);
|
|
1329 |
}
|
|
1330 |
|
|
1331 |
//------------------------------Node_Stack-------------------------------------
|
|
1332 |
class Node_Stack {
|
|
1333 |
protected:
|
|
1334 |
struct INode {
|
|
1335 |
Node *node; // Processed node
|
|
1336 |
uint indx; // Index of next node's child
|
|
1337 |
};
|
|
1338 |
INode *_inode_top; // tos, stack grows up
|
|
1339 |
INode *_inode_max; // End of _inodes == _inodes + _max
|
|
1340 |
INode *_inodes; // Array storage for the stack
|
|
1341 |
Arena *_a; // Arena to allocate in
|
|
1342 |
void grow();
|
|
1343 |
public:
|
|
1344 |
Node_Stack(int size) {
|
|
1345 |
size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
|
|
1346 |
_a = Thread::current()->resource_area();
|
|
1347 |
_inodes = NEW_ARENA_ARRAY( _a, INode, max );
|
|
1348 |
_inode_max = _inodes + max;
|
|
1349 |
_inode_top = _inodes - 1; // stack is empty
|
|
1350 |
}
|
|
1351 |
|
|
1352 |
Node_Stack(Arena *a, int size) : _a(a) {
|
|
1353 |
size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
|
|
1354 |
_inodes = NEW_ARENA_ARRAY( _a, INode, max );
|
|
1355 |
_inode_max = _inodes + max;
|
|
1356 |
_inode_top = _inodes - 1; // stack is empty
|
|
1357 |
}
|
|
1358 |
|
|
1359 |
void pop() {
|
|
1360 |
assert(_inode_top >= _inodes, "node stack underflow");
|
|
1361 |
--_inode_top;
|
|
1362 |
}
|
|
1363 |
void push(Node *n, uint i) {
|
|
1364 |
++_inode_top;
|
|
1365 |
if (_inode_top >= _inode_max) grow();
|
|
1366 |
INode *top = _inode_top; // optimization
|
|
1367 |
top->node = n;
|
|
1368 |
top->indx = i;
|
|
1369 |
}
|
|
1370 |
Node *node() const {
|
|
1371 |
return _inode_top->node;
|
|
1372 |
}
|
|
1373 |
Node* node_at(uint i) const {
|
|
1374 |
assert(_inodes + i <= _inode_top, "in range");
|
|
1375 |
return _inodes[i].node;
|
|
1376 |
}
|
|
1377 |
uint index() const {
|
|
1378 |
return _inode_top->indx;
|
|
1379 |
}
|
|
1380 |
void set_node(Node *n) {
|
|
1381 |
_inode_top->node = n;
|
|
1382 |
}
|
|
1383 |
void set_index(uint i) {
|
|
1384 |
_inode_top->indx = i;
|
|
1385 |
}
|
|
1386 |
uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
|
213
|
1387 |
uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
|
1
|
1388 |
bool is_nonempty() const { return (_inode_top >= _inodes); }
|
|
1389 |
bool is_empty() const { return (_inode_top < _inodes); }
|
|
1390 |
void clear() { _inode_top = _inodes - 1; } // retain storage
|
|
1391 |
};
|
|
1392 |
|
|
1393 |
|
|
1394 |
//-----------------------------Node_Notes--------------------------------------
|
|
1395 |
// Debugging or profiling annotations loosely and sparsely associated
|
|
1396 |
// with some nodes. See Compile::node_notes_at for the accessor.
|
|
1397 |
class Node_Notes VALUE_OBJ_CLASS_SPEC {
|
|
1398 |
JVMState* _jvms;
|
|
1399 |
|
|
1400 |
public:
|
|
1401 |
Node_Notes(JVMState* jvms = NULL) {
|
|
1402 |
_jvms = jvms;
|
|
1403 |
}
|
|
1404 |
|
|
1405 |
JVMState* jvms() { return _jvms; }
|
|
1406 |
void set_jvms(JVMState* x) { _jvms = x; }
|
|
1407 |
|
|
1408 |
// True if there is nothing here.
|
|
1409 |
bool is_clear() {
|
|
1410 |
return (_jvms == NULL);
|
|
1411 |
}
|
|
1412 |
|
|
1413 |
// Make there be nothing here.
|
|
1414 |
void clear() {
|
|
1415 |
_jvms = NULL;
|
|
1416 |
}
|
|
1417 |
|
|
1418 |
// Make a new, clean node notes.
|
|
1419 |
static Node_Notes* make(Compile* C) {
|
|
1420 |
Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
|
|
1421 |
nn->clear();
|
|
1422 |
return nn;
|
|
1423 |
}
|
|
1424 |
|
|
1425 |
Node_Notes* clone(Compile* C) {
|
|
1426 |
Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
|
|
1427 |
(*nn) = (*this);
|
|
1428 |
return nn;
|
|
1429 |
}
|
|
1430 |
|
|
1431 |
// Absorb any information from source.
|
|
1432 |
bool update_from(Node_Notes* source) {
|
|
1433 |
bool changed = false;
|
|
1434 |
if (source != NULL) {
|
|
1435 |
if (source->jvms() != NULL) {
|
|
1436 |
set_jvms(source->jvms());
|
|
1437 |
changed = true;
|
|
1438 |
}
|
|
1439 |
}
|
|
1440 |
return changed;
|
|
1441 |
}
|
|
1442 |
};
|
|
1443 |
|
|
1444 |
// Inlined accessors for Compile::node_nodes that require the preceding class:
|
|
1445 |
inline Node_Notes*
|
|
1446 |
Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
|
|
1447 |
int idx, bool can_grow) {
|
|
1448 |
assert(idx >= 0, "oob");
|
|
1449 |
int block_idx = (idx >> _log2_node_notes_block_size);
|
|
1450 |
int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
|
|
1451 |
if (grow_by >= 0) {
|
|
1452 |
if (!can_grow) return NULL;
|
|
1453 |
grow_node_notes(arr, grow_by + 1);
|
|
1454 |
}
|
|
1455 |
// (Every element of arr is a sub-array of length _node_notes_block_size.)
|
|
1456 |
return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
|
|
1457 |
}
|
|
1458 |
|
|
1459 |
inline bool
|
|
1460 |
Compile::set_node_notes_at(int idx, Node_Notes* value) {
|
|
1461 |
if (value == NULL || value->is_clear())
|
|
1462 |
return false; // nothing to write => write nothing
|
|
1463 |
Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
|
|
1464 |
assert(loc != NULL, "");
|
|
1465 |
return loc->update_from(value);
|
|
1466 |
}
|
|
1467 |
|
|
1468 |
|
|
1469 |
//------------------------------TypeNode---------------------------------------
|
|
1470 |
// Node with a Type constant.
|
|
1471 |
class TypeNode : public Node {
|
|
1472 |
protected:
|
|
1473 |
virtual uint hash() const; // Check the type
|
|
1474 |
virtual uint cmp( const Node &n ) const;
|
|
1475 |
virtual uint size_of() const; // Size is bigger
|
|
1476 |
const Type* const _type;
|
|
1477 |
public:
|
|
1478 |
void set_type(const Type* t) {
|
|
1479 |
assert(t != NULL, "sanity");
|
|
1480 |
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
|
|
1481 |
*(const Type**)&_type = t; // cast away const-ness
|
|
1482 |
// If this node is in the hash table, make sure it doesn't need a rehash.
|
|
1483 |
assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
|
|
1484 |
}
|
|
1485 |
const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
|
|
1486 |
TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
|
|
1487 |
init_class_id(Class_Type);
|
|
1488 |
}
|
|
1489 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
1490 |
virtual const Type *bottom_type() const;
|
|
1491 |
virtual uint ideal_reg() const;
|
|
1492 |
#ifndef PRODUCT
|
|
1493 |
virtual void dump_spec(outputStream *st) const;
|
|
1494 |
#endif
|
|
1495 |
};
|