author | kvn |
Tue, 15 Apr 2008 10:49:32 -0700 | |
changeset 258 | dbd6f2ed7ba0 |
parent 238 | 803c80713999 |
child 369 | 640d1cdd140f |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2 |
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
// Portions of code courtesy of Clifford Click |
|
26 |
||
27 |
// Optimization - Graph Style |
|
28 |
||
29 |
||
30 |
class AbstractLockNode; |
|
31 |
class AddNode; |
|
32 |
class AddPNode; |
|
33 |
class AliasInfo; |
|
34 |
class AllocateArrayNode; |
|
35 |
class AllocateNode; |
|
36 |
class Block; |
|
37 |
class Block_Array; |
|
38 |
class BoolNode; |
|
39 |
class BoxLockNode; |
|
40 |
class CMoveNode; |
|
41 |
class CallDynamicJavaNode; |
|
42 |
class CallJavaNode; |
|
43 |
class CallLeafNode; |
|
44 |
class CallNode; |
|
45 |
class CallRuntimeNode; |
|
46 |
class CallStaticJavaNode; |
|
47 |
class CatchNode; |
|
48 |
class CatchProjNode; |
|
49 |
class CheckCastPPNode; |
|
50 |
class CmpNode; |
|
51 |
class CodeBuffer; |
|
52 |
class ConstraintCastNode; |
|
53 |
class ConNode; |
|
54 |
class CountedLoopNode; |
|
55 |
class CountedLoopEndNode; |
|
56 |
class FastLockNode; |
|
57 |
class FastUnlockNode; |
|
58 |
class IfNode; |
|
59 |
class InitializeNode; |
|
60 |
class JVMState; |
|
61 |
class JumpNode; |
|
62 |
class JumpProjNode; |
|
63 |
class LoadNode; |
|
64 |
class LoadStoreNode; |
|
65 |
class LockNode; |
|
66 |
class LoopNode; |
|
67 |
class MachCallDynamicJavaNode; |
|
68 |
class MachCallJavaNode; |
|
69 |
class MachCallLeafNode; |
|
70 |
class MachCallNode; |
|
71 |
class MachCallRuntimeNode; |
|
72 |
class MachCallStaticJavaNode; |
|
73 |
class MachIfNode; |
|
74 |
class MachNode; |
|
75 |
class MachNullCheckNode; |
|
76 |
class MachReturnNode; |
|
77 |
class MachSafePointNode; |
|
78 |
class MachSpillCopyNode; |
|
79 |
class MachTempNode; |
|
80 |
class Matcher; |
|
81 |
class MemBarNode; |
|
82 |
class MemNode; |
|
83 |
class MergeMemNode; |
|
84 |
class MulNode; |
|
85 |
class MultiNode; |
|
86 |
class MultiBranchNode; |
|
87 |
class NeverBranchNode; |
|
88 |
class Node; |
|
89 |
class Node_Array; |
|
90 |
class Node_List; |
|
91 |
class Node_Stack; |
|
92 |
class NullCheckNode; |
|
93 |
class OopMap; |
|
206 | 94 |
class ParmNode; |
1 | 95 |
class PCTableNode; |
96 |
class PhaseCCP; |
|
97 |
class PhaseGVN; |
|
98 |
class PhaseIterGVN; |
|
99 |
class PhaseRegAlloc; |
|
100 |
class PhaseTransform; |
|
101 |
class PhaseValues; |
|
102 |
class PhiNode; |
|
103 |
class Pipeline; |
|
104 |
class ProjNode; |
|
105 |
class RegMask; |
|
106 |
class RegionNode; |
|
107 |
class RootNode; |
|
108 |
class SafePointNode; |
|
236
9a04268c8eea
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
213
diff
changeset
|
109 |
class SafePointScalarObjectNode; |
1 | 110 |
class StartNode; |
111 |
class State; |
|
112 |
class StoreNode; |
|
113 |
class SubNode; |
|
114 |
class Type; |
|
115 |
class TypeNode; |
|
116 |
class UnlockNode; |
|
117 |
class VectorSet; |
|
118 |
class IfTrueNode; |
|
119 |
class IfFalseNode; |
|
120 |
typedef void (*NFunc)(Node&,void*); |
|
121 |
extern "C" { |
|
122 |
typedef int (*C_sort_func_t)(const void *, const void *); |
|
123 |
} |
|
124 |
||
125 |
// The type of all node counts and indexes. |
|
126 |
// It must hold at least 16 bits, but must also be fast to load and store. |
|
127 |
// This type, if less than 32 bits, could limit the number of possible nodes. |
|
128 |
// (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) |
|
129 |
typedef unsigned int node_idx_t; |
|
130 |
||
131 |
||
132 |
#ifndef OPTO_DU_ITERATOR_ASSERT |
|
133 |
#ifdef ASSERT |
|
134 |
#define OPTO_DU_ITERATOR_ASSERT 1 |
|
135 |
#else |
|
136 |
#define OPTO_DU_ITERATOR_ASSERT 0 |
|
137 |
#endif |
|
138 |
#endif //OPTO_DU_ITERATOR_ASSERT |
|
139 |
||
140 |
#if OPTO_DU_ITERATOR_ASSERT |
|
141 |
class DUIterator; |
|
142 |
class DUIterator_Fast; |
|
143 |
class DUIterator_Last; |
|
144 |
#else |
|
145 |
typedef uint DUIterator; |
|
146 |
typedef Node** DUIterator_Fast; |
|
147 |
typedef Node** DUIterator_Last; |
|
148 |
#endif |
|
149 |
||
150 |
// Node Sentinel |
|
151 |
#define NodeSentinel (Node*)-1 |
|
152 |
||
153 |
// Unknown count frequency |
|
154 |
#define COUNT_UNKNOWN (-1.0f) |
|
155 |
||
156 |
//------------------------------Node------------------------------------------- |
|
157 |
// Nodes define actions in the program. They create values, which have types. |
|
158 |
// They are both vertices in a directed graph and program primitives. Nodes |
|
159 |
// are labeled; the label is the "opcode", the primitive function in the lambda |
|
160 |
// calculus sense that gives meaning to the Node. Node inputs are ordered (so |
|
161 |
// that "a-b" is different from "b-a"). The inputs to a Node are the inputs to |
|
162 |
// the Node's function. These inputs also define a Type equation for the Node. |
|
163 |
// Solving these Type equations amounts to doing dataflow analysis. |
|
164 |
// Control and data are uniformly represented in the graph. Finally, Nodes |
|
165 |
// have a unique dense integer index which is used to index into side arrays |
|
166 |
// whenever I have phase-specific information. |
|
167 |
||
168 |
class Node { |
|
169 |
// Lots of restrictions on cloning Nodes |
|
170 |
Node(const Node&); // not defined; linker error to use these |
|
171 |
Node &operator=(const Node &rhs); |
|
172 |
||
173 |
public: |
|
174 |
friend class Compile; |
|
175 |
#if OPTO_DU_ITERATOR_ASSERT |
|
176 |
friend class DUIterator_Common; |
|
177 |
friend class DUIterator; |
|
178 |
friend class DUIterator_Fast; |
|
179 |
friend class DUIterator_Last; |
|
180 |
#endif |
|
181 |
||
182 |
// Because Nodes come and go, I define an Arena of Node structures to pull |
|
183 |
// from. This should allow fast access to node creation & deletion. This |
|
184 |
// field is a local cache of a value defined in some "program fragment" for |
|
185 |
// which these Nodes are just a part of. |
|
186 |
||
187 |
// New Operator that takes a Compile pointer, this will eventually |
|
188 |
// be the "new" New operator. |
|
189 |
inline void* operator new( size_t x, Compile* C) { |
|
190 |
Node* n = (Node*)C->node_arena()->Amalloc_D(x); |
|
191 |
#ifdef ASSERT |
|
192 |
n->_in = (Node**)n; // magic cookie for assertion check |
|
193 |
#endif |
|
194 |
n->_out = (Node**)C; |
|
195 |
return (void*)n; |
|
196 |
} |
|
197 |
||
198 |
// New Operator that takes a Compile pointer, this will eventually |
|
199 |
// be the "new" New operator. |
|
200 |
inline void* operator new( size_t x, Compile* C, int y) { |
|
201 |
Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*)); |
|
202 |
n->_in = (Node**)(((char*)n) + x); |
|
203 |
#ifdef ASSERT |
|
204 |
n->_in[y-1] = n; // magic cookie for assertion check |
|
205 |
#endif |
|
206 |
n->_out = (Node**)C; |
|
207 |
return (void*)n; |
|
208 |
} |
|
209 |
||
210 |
// Delete is a NOP |
|
211 |
void operator delete( void *ptr ) {} |
|
212 |
// Fancy destructor; eagerly attempt to reclaim Node numberings and storage |
|
213 |
void destruct(); |
|
214 |
||
215 |
// Create a new Node. Required is the number is of inputs required for |
|
216 |
// semantic correctness. |
|
217 |
Node( uint required ); |
|
218 |
||
219 |
// Create a new Node with given input edges. |
|
220 |
// This version requires use of the "edge-count" new. |
|
221 |
// E.g. new (C,3) FooNode( C, NULL, left, right ); |
|
222 |
Node( Node *n0 ); |
|
223 |
Node( Node *n0, Node *n1 ); |
|
224 |
Node( Node *n0, Node *n1, Node *n2 ); |
|
225 |
Node( Node *n0, Node *n1, Node *n2, Node *n3 ); |
|
226 |
Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); |
|
227 |
Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); |
|
228 |
Node( Node *n0, Node *n1, Node *n2, Node *n3, |
|
229 |
Node *n4, Node *n5, Node *n6 ); |
|
230 |
||
231 |
// Clone an inherited Node given only the base Node type. |
|
232 |
Node* clone() const; |
|
233 |
||
234 |
// Clone a Node, immediately supplying one or two new edges. |
|
235 |
// The first and second arguments, if non-null, replace in(1) and in(2), |
|
236 |
// respectively. |
|
237 |
Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { |
|
238 |
Node* nn = clone(); |
|
239 |
if (in1 != NULL) nn->set_req(1, in1); |
|
240 |
if (in2 != NULL) nn->set_req(2, in2); |
|
241 |
return nn; |
|
242 |
} |
|
243 |
||
244 |
private: |
|
245 |
// Shared setup for the above constructors. |
|
246 |
// Handles all interactions with Compile::current. |
|
247 |
// Puts initial values in all Node fields except _idx. |
|
248 |
// Returns the initial value for _idx, which cannot |
|
249 |
// be initialized by assignment. |
|
250 |
inline int Init(int req, Compile* C); |
|
251 |
||
252 |
//----------------- input edge handling |
|
253 |
protected: |
|
254 |
friend class PhaseCFG; // Access to address of _in array elements |
|
255 |
Node **_in; // Array of use-def references to Nodes |
|
256 |
Node **_out; // Array of def-use references to Nodes |
|
257 |
||
258 |
// Input edges are split into two catagories. Required edges are required |
|
259 |
// for semantic correctness; order is important and NULLs are allowed. |
|
260 |
// Precedence edges are used to help determine execution order and are |
|
261 |
// added, e.g., for scheduling purposes. They are unordered and not |
|
262 |
// duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 |
|
263 |
// are required, from _cnt to _max-1 are precedence edges. |
|
264 |
node_idx_t _cnt; // Total number of required Node inputs. |
|
265 |
||
266 |
node_idx_t _max; // Actual length of input array. |
|
267 |
||
268 |
// Output edges are an unordered list of def-use edges which exactly |
|
269 |
// correspond to required input edges which point from other nodes |
|
270 |
// to this one. Thus the count of the output edges is the number of |
|
271 |
// users of this node. |
|
272 |
node_idx_t _outcnt; // Total number of Node outputs. |
|
273 |
||
274 |
node_idx_t _outmax; // Actual length of output array. |
|
275 |
||
276 |
// Grow the actual input array to the next larger power-of-2 bigger than len. |
|
277 |
void grow( uint len ); |
|
278 |
// Grow the output array to the next larger power-of-2 bigger than len. |
|
279 |
void out_grow( uint len ); |
|
280 |
||
281 |
public: |
|
282 |
// Each Node is assigned a unique small/dense number. This number is used |
|
283 |
// to index into auxiliary arrays of data and bitvectors. |
|
284 |
// It is declared const to defend against inadvertant assignment, |
|
285 |
// since it is used by clients as a naked field. |
|
286 |
const node_idx_t _idx; |
|
287 |
||
288 |
// Get the (read-only) number of input edges |
|
289 |
uint req() const { return _cnt; } |
|
290 |
uint len() const { return _max; } |
|
291 |
// Get the (read-only) number of output edges |
|
292 |
uint outcnt() const { return _outcnt; } |
|
293 |
||
294 |
#if OPTO_DU_ITERATOR_ASSERT |
|
295 |
// Iterate over the out-edges of this node. Deletions are illegal. |
|
296 |
inline DUIterator outs() const; |
|
297 |
// Use this when the out array might have changed to suppress asserts. |
|
298 |
inline DUIterator& refresh_out_pos(DUIterator& i) const; |
|
299 |
// Does the node have an out at this position? (Used for iteration.) |
|
300 |
inline bool has_out(DUIterator& i) const; |
|
301 |
inline Node* out(DUIterator& i) const; |
|
302 |
// Iterate over the out-edges of this node. All changes are illegal. |
|
303 |
inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; |
|
304 |
inline Node* fast_out(DUIterator_Fast& i) const; |
|
305 |
// Iterate over the out-edges of this node, deleting one at a time. |
|
306 |
inline DUIterator_Last last_outs(DUIterator_Last& min) const; |
|
307 |
inline Node* last_out(DUIterator_Last& i) const; |
|
308 |
// The inline bodies of all these methods are after the iterator definitions. |
|
309 |
#else |
|
310 |
// Iterate over the out-edges of this node. Deletions are illegal. |
|
311 |
// This iteration uses integral indexes, to decouple from array reallocations. |
|
312 |
DUIterator outs() const { return 0; } |
|
313 |
// Use this when the out array might have changed to suppress asserts. |
|
314 |
DUIterator refresh_out_pos(DUIterator i) const { return i; } |
|
315 |
||
316 |
// Reference to the i'th output Node. Error if out of bounds. |
|
317 |
Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } |
|
318 |
// Does the node have an out at this position? (Used for iteration.) |
|
319 |
bool has_out(DUIterator i) const { return i < _outcnt; } |
|
320 |
||
321 |
// Iterate over the out-edges of this node. All changes are illegal. |
|
322 |
// This iteration uses a pointer internal to the out array. |
|
323 |
DUIterator_Fast fast_outs(DUIterator_Fast& max) const { |
|
324 |
Node** out = _out; |
|
325 |
// Assign a limit pointer to the reference argument: |
|
326 |
max = out + (ptrdiff_t)_outcnt; |
|
327 |
// Return the base pointer: |
|
328 |
return out; |
|
329 |
} |
|
330 |
Node* fast_out(DUIterator_Fast i) const { return *i; } |
|
331 |
// Iterate over the out-edges of this node, deleting one at a time. |
|
332 |
// This iteration uses a pointer internal to the out array. |
|
333 |
DUIterator_Last last_outs(DUIterator_Last& min) const { |
|
334 |
Node** out = _out; |
|
335 |
// Assign a limit pointer to the reference argument: |
|
336 |
min = out; |
|
337 |
// Return the pointer to the start of the iteration: |
|
338 |
return out + (ptrdiff_t)_outcnt - 1; |
|
339 |
} |
|
340 |
Node* last_out(DUIterator_Last i) const { return *i; } |
|
341 |
#endif |
|
342 |
||
343 |
// Reference to the i'th input Node. Error if out of bounds. |
|
344 |
Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; } |
|
345 |
// Reference to the i'th output Node. Error if out of bounds. |
|
346 |
// Use this accessor sparingly. We are going trying to use iterators instead. |
|
347 |
Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } |
|
348 |
// Return the unique out edge. |
|
349 |
Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } |
|
350 |
// Delete out edge at position 'i' by moving last out edge to position 'i' |
|
351 |
void raw_del_out(uint i) { |
|
352 |
assert(i < _outcnt,"oob"); |
|
353 |
assert(_outcnt > 0,"oob"); |
|
354 |
#if OPTO_DU_ITERATOR_ASSERT |
|
355 |
// Record that a change happened here. |
|
356 |
debug_only(_last_del = _out[i]; ++_del_tick); |
|
357 |
#endif |
|
358 |
_out[i] = _out[--_outcnt]; |
|
359 |
// Smash the old edge so it can't be used accidentally. |
|
360 |
debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); |
|
361 |
} |
|
362 |
||
363 |
#ifdef ASSERT |
|
364 |
bool is_dead() const; |
|
365 |
#define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) |
|
366 |
#endif |
|
367 |
||
368 |
// Set a required input edge, also updates corresponding output edge |
|
369 |
void add_req( Node *n ); // Append a NEW required input |
|
370 |
void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). |
|
371 |
void del_req( uint idx ); // Delete required edge & compact |
|
372 |
void ins_req( uint i, Node *n ); // Insert a NEW required input |
|
373 |
void set_req( uint i, Node *n ) { |
|
374 |
assert( is_not_dead(n), "can not use dead node"); |
|
375 |
assert( i < _cnt, "oob"); |
|
376 |
assert( !VerifyHashTableKeys || _hash_lock == 0, |
|
377 |
"remove node from hash table before modifying it"); |
|
378 |
Node** p = &_in[i]; // cache this._in, across the del_out call |
|
379 |
if (*p != NULL) (*p)->del_out((Node *)this); |
|
380 |
(*p) = n; |
|
381 |
if (n != NULL) n->add_out((Node *)this); |
|
382 |
} |
|
383 |
// Light version of set_req() to init inputs after node creation. |
|
384 |
void init_req( uint i, Node *n ) { |
|
385 |
assert( i == 0 && this == n || |
|
386 |
is_not_dead(n), "can not use dead node"); |
|
387 |
assert( i < _cnt, "oob"); |
|
388 |
assert( !VerifyHashTableKeys || _hash_lock == 0, |
|
389 |
"remove node from hash table before modifying it"); |
|
390 |
assert( _in[i] == NULL, "sanity"); |
|
391 |
_in[i] = n; |
|
392 |
if (n != NULL) n->add_out((Node *)this); |
|
393 |
} |
|
394 |
// Find first occurrence of n among my edges: |
|
395 |
int find_edge(Node* n); |
|
396 |
int replace_edge(Node* old, Node* neww); |
|
397 |
// NULL out all inputs to eliminate incoming Def-Use edges. |
|
398 |
// Return the number of edges between 'n' and 'this' |
|
399 |
int disconnect_inputs(Node *n); |
|
400 |
||
401 |
// Quickly, return true if and only if I am Compile::current()->top(). |
|
402 |
bool is_top() const { |
|
403 |
assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); |
|
404 |
return (_out == NULL); |
|
405 |
} |
|
406 |
// Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) |
|
407 |
void setup_is_top(); |
|
408 |
||
409 |
// Strip away casting. (It is depth-limited.) |
|
410 |
Node* uncast() const; |
|
411 |
||
412 |
private: |
|
413 |
static Node* uncast_helper(const Node* n); |
|
414 |
||
415 |
// Add an output edge to the end of the list |
|
416 |
void add_out( Node *n ) { |
|
417 |
if (is_top()) return; |
|
418 |
if( _outcnt == _outmax ) out_grow(_outcnt); |
|
419 |
_out[_outcnt++] = n; |
|
420 |
} |
|
421 |
// Delete an output edge |
|
422 |
void del_out( Node *n ) { |
|
423 |
if (is_top()) return; |
|
424 |
Node** outp = &_out[_outcnt]; |
|
425 |
// Find and remove n |
|
426 |
do { |
|
427 |
assert(outp > _out, "Missing Def-Use edge"); |
|
428 |
} while (*--outp != n); |
|
429 |
*outp = _out[--_outcnt]; |
|
430 |
// Smash the old edge so it can't be used accidentally. |
|
431 |
debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); |
|
432 |
// Record that a change happened here. |
|
433 |
#if OPTO_DU_ITERATOR_ASSERT |
|
434 |
debug_only(_last_del = n; ++_del_tick); |
|
435 |
#endif |
|
436 |
} |
|
437 |
||
438 |
public: |
|
439 |
// Globally replace this node by a given new node, updating all uses. |
|
440 |
void replace_by(Node* new_node); |
|
441 |
void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); |
|
442 |
// Find the one non-null required input. RegionNode only |
|
443 |
Node *nonnull_req() const; |
|
444 |
// Add or remove precedence edges |
|
445 |
void add_prec( Node *n ); |
|
446 |
void rm_prec( uint i ); |
|
447 |
void set_prec( uint i, Node *n ) { |
|
448 |
assert( is_not_dead(n), "can not use dead node"); |
|
449 |
assert( i >= _cnt, "not a precedence edge"); |
|
450 |
if (_in[i] != NULL) _in[i]->del_out((Node *)this); |
|
451 |
_in[i] = n; |
|
452 |
if (n != NULL) n->add_out((Node *)this); |
|
453 |
} |
|
454 |
// Set this node's index, used by cisc_version to replace current node |
|
455 |
void set_idx(uint new_idx) { |
|
456 |
const node_idx_t* ref = &_idx; |
|
457 |
*(node_idx_t*)ref = new_idx; |
|
458 |
} |
|
459 |
// Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) |
|
460 |
void swap_edges(uint i1, uint i2) { |
|
461 |
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); |
|
462 |
// Def-Use info is unchanged |
|
463 |
Node* n1 = in(i1); |
|
464 |
Node* n2 = in(i2); |
|
465 |
_in[i1] = n2; |
|
466 |
_in[i2] = n1; |
|
467 |
// If this node is in the hash table, make sure it doesn't need a rehash. |
|
468 |
assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); |
|
469 |
} |
|
470 |
||
471 |
// Iterators over input Nodes for a Node X are written as: |
|
472 |
// for( i = 0; i < X.req(); i++ ) ... X[i] ... |
|
473 |
// NOTE: Required edges can contain embedded NULL pointers. |
|
474 |
||
475 |
//----------------- Other Node Properties |
|
476 |
||
477 |
// Generate class id for some ideal nodes to avoid virtual query |
|
478 |
// methods is_<Node>(). |
|
479 |
// Class id is the set of bits corresponded to the node class and all its |
|
480 |
// super classes so that queries for super classes are also valid. |
|
481 |
// Subclasses of the same super class have different assigned bit |
|
482 |
// (the third parameter in the macro DEFINE_CLASS_ID). |
|
483 |
// Classes with deeper hierarchy are declared first. |
|
484 |
// Classes with the same hierarchy depth are sorted by usage frequency. |
|
485 |
// |
|
486 |
// The query method masks the bits to cut off bits of subclasses |
|
487 |
// and then compare the result with the class id |
|
488 |
// (see the macro DEFINE_CLASS_QUERY below). |
|
489 |
// |
|
490 |
// Class_MachCall=30, ClassMask_MachCall=31 |
|
491 |
// 12 8 4 0 |
|
492 |
// 0 0 0 0 0 0 0 0 1 1 1 1 0 |
|
493 |
// | | | | |
|
494 |
// | | | Bit_Mach=2 |
|
495 |
// | | Bit_MachReturn=4 |
|
496 |
// | Bit_MachSafePoint=8 |
|
497 |
// Bit_MachCall=16 |
|
498 |
// |
|
499 |
// Class_CountedLoop=56, ClassMask_CountedLoop=63 |
|
500 |
// 12 8 4 0 |
|
501 |
// 0 0 0 0 0 0 0 1 1 1 0 0 0 |
|
502 |
// | | | |
|
503 |
// | | Bit_Region=8 |
|
504 |
// | Bit_Loop=16 |
|
505 |
// Bit_CountedLoop=32 |
|
506 |
||
507 |
#define DEFINE_CLASS_ID(cl, supcl, subn) \ |
|
508 |
Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ |
|
509 |
Class_##cl = Class_##supcl + Bit_##cl , \ |
|
510 |
ClassMask_##cl = ((Bit_##cl << 1) - 1) , |
|
511 |
||
512 |
// This enum is used only for C2 ideal and mach nodes with is_<node>() methods |
|
513 |
// so that it's values fits into 16 bits. |
|
514 |
enum NodeClasses { |
|
515 |
Bit_Node = 0x0000, |
|
516 |
Class_Node = 0x0000, |
|
517 |
ClassMask_Node = 0xFFFF, |
|
518 |
||
519 |
DEFINE_CLASS_ID(Multi, Node, 0) |
|
520 |
DEFINE_CLASS_ID(SafePoint, Multi, 0) |
|
521 |
DEFINE_CLASS_ID(Call, SafePoint, 0) |
|
522 |
DEFINE_CLASS_ID(CallJava, Call, 0) |
|
523 |
DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) |
|
524 |
DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) |
|
525 |
DEFINE_CLASS_ID(CallRuntime, Call, 1) |
|
526 |
DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) |
|
527 |
DEFINE_CLASS_ID(Allocate, Call, 2) |
|
528 |
DEFINE_CLASS_ID(AllocateArray, Allocate, 0) |
|
529 |
DEFINE_CLASS_ID(AbstractLock, Call, 3) |
|
530 |
DEFINE_CLASS_ID(Lock, AbstractLock, 0) |
|
531 |
DEFINE_CLASS_ID(Unlock, AbstractLock, 1) |
|
532 |
DEFINE_CLASS_ID(MultiBranch, Multi, 1) |
|
533 |
DEFINE_CLASS_ID(PCTable, MultiBranch, 0) |
|
534 |
DEFINE_CLASS_ID(Catch, PCTable, 0) |
|
535 |
DEFINE_CLASS_ID(Jump, PCTable, 1) |
|
536 |
DEFINE_CLASS_ID(If, MultiBranch, 1) |
|
537 |
DEFINE_CLASS_ID(CountedLoopEnd, If, 0) |
|
538 |
DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) |
|
539 |
DEFINE_CLASS_ID(Start, Multi, 2) |
|
540 |
DEFINE_CLASS_ID(MemBar, Multi, 3) |
|
541 |
DEFINE_CLASS_ID(Initialize, MemBar, 0) |
|
542 |
||
543 |
DEFINE_CLASS_ID(Mach, Node, 1) |
|
544 |
DEFINE_CLASS_ID(MachReturn, Mach, 0) |
|
545 |
DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) |
|
546 |
DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) |
|
547 |
DEFINE_CLASS_ID(MachCallJava, MachCall, 0) |
|
548 |
DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) |
|
549 |
DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) |
|
550 |
DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) |
|
551 |
DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) |
|
552 |
DEFINE_CLASS_ID(MachSpillCopy, Mach, 1) |
|
553 |
DEFINE_CLASS_ID(MachNullCheck, Mach, 2) |
|
554 |
DEFINE_CLASS_ID(MachIf, Mach, 3) |
|
555 |
DEFINE_CLASS_ID(MachTemp, Mach, 4) |
|
556 |
||
557 |
DEFINE_CLASS_ID(Proj, Node, 2) |
|
558 |
DEFINE_CLASS_ID(CatchProj, Proj, 0) |
|
559 |
DEFINE_CLASS_ID(JumpProj, Proj, 1) |
|
560 |
DEFINE_CLASS_ID(IfTrue, Proj, 2) |
|
561 |
DEFINE_CLASS_ID(IfFalse, Proj, 3) |
|
206 | 562 |
DEFINE_CLASS_ID(Parm, Proj, 4) |
1 | 563 |
|
564 |
DEFINE_CLASS_ID(Region, Node, 3) |
|
565 |
DEFINE_CLASS_ID(Loop, Region, 0) |
|
566 |
DEFINE_CLASS_ID(Root, Loop, 0) |
|
567 |
DEFINE_CLASS_ID(CountedLoop, Loop, 1) |
|
568 |
||
569 |
DEFINE_CLASS_ID(Sub, Node, 4) |
|
570 |
DEFINE_CLASS_ID(Cmp, Sub, 0) |
|
571 |
DEFINE_CLASS_ID(FastLock, Cmp, 0) |
|
572 |
DEFINE_CLASS_ID(FastUnlock, Cmp, 1) |
|
573 |
||
574 |
DEFINE_CLASS_ID(Type, Node, 5) |
|
575 |
DEFINE_CLASS_ID(Phi, Type, 0) |
|
576 |
DEFINE_CLASS_ID(ConstraintCast, Type, 1) |
|
577 |
DEFINE_CLASS_ID(CheckCastPP, Type, 2) |
|
578 |
DEFINE_CLASS_ID(CMove, Type, 3) |
|
236
9a04268c8eea
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
213
diff
changeset
|
579 |
DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) |
1 | 580 |
|
581 |
DEFINE_CLASS_ID(Mem, Node, 6) |
|
582 |
DEFINE_CLASS_ID(Load, Mem, 0) |
|
583 |
DEFINE_CLASS_ID(Store, Mem, 1) |
|
584 |
DEFINE_CLASS_ID(LoadStore, Mem, 2) |
|
585 |
||
586 |
DEFINE_CLASS_ID(MergeMem, Node, 7) |
|
587 |
DEFINE_CLASS_ID(Bool, Node, 8) |
|
588 |
DEFINE_CLASS_ID(AddP, Node, 9) |
|
589 |
DEFINE_CLASS_ID(BoxLock, Node, 10) |
|
590 |
DEFINE_CLASS_ID(Add, Node, 11) |
|
591 |
DEFINE_CLASS_ID(Mul, Node, 12) |
|
592 |
||
593 |
_max_classes = ClassMask_Mul |
|
594 |
}; |
|
595 |
#undef DEFINE_CLASS_ID |
|
596 |
||
597 |
// Flags are sorted by usage frequency. |
|
598 |
enum NodeFlags { |
|
599 |
Flag_is_Copy = 0x01, // should be first bit to avoid shift |
|
600 |
Flag_is_Call = Flag_is_Copy << 1, |
|
601 |
Flag_rematerialize = Flag_is_Call << 1, |
|
602 |
Flag_needs_anti_dependence_check = Flag_rematerialize << 1, |
|
603 |
Flag_is_macro = Flag_needs_anti_dependence_check << 1, |
|
604 |
Flag_is_Con = Flag_is_macro << 1, |
|
605 |
Flag_is_cisc_alternate = Flag_is_Con << 1, |
|
606 |
Flag_is_Branch = Flag_is_cisc_alternate << 1, |
|
607 |
Flag_is_block_start = Flag_is_Branch << 1, |
|
608 |
Flag_is_Goto = Flag_is_block_start << 1, |
|
609 |
Flag_is_dead_loop_safe = Flag_is_Goto << 1, |
|
610 |
Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1, |
|
611 |
Flag_is_safepoint_node = Flag_may_be_short_branch << 1, |
|
612 |
Flag_is_pc_relative = Flag_is_safepoint_node << 1, |
|
613 |
Flag_is_Vector = Flag_is_pc_relative << 1, |
|
614 |
_max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination |
|
615 |
}; |
|
616 |
||
617 |
private: |
|
618 |
jushort _class_id; |
|
619 |
jushort _flags; |
|
620 |
||
621 |
protected: |
|
622 |
// These methods should be called from constructors only. |
|
623 |
void init_class_id(jushort c) { |
|
624 |
assert(c <= _max_classes, "invalid node class"); |
|
625 |
_class_id = c; // cast out const |
|
626 |
} |
|
627 |
void init_flags(jushort fl) { |
|
628 |
assert(fl <= _max_flags, "invalid node flag"); |
|
629 |
_flags |= fl; |
|
630 |
} |
|
631 |
void clear_flag(jushort fl) { |
|
632 |
assert(fl <= _max_flags, "invalid node flag"); |
|
633 |
_flags &= ~fl; |
|
634 |
} |
|
635 |
||
636 |
public: |
|
637 |
const jushort class_id() const { return _class_id; } |
|
638 |
||
639 |
const jushort flags() const { return _flags; } |
|
640 |
||
641 |
// Return a dense integer opcode number |
|
642 |
virtual int Opcode() const; |
|
643 |
||
644 |
// Virtual inherited Node size |
|
645 |
virtual uint size_of() const; |
|
646 |
||
647 |
// Other interesting Node properties |
|
648 |
||
649 |
// Special case: is_Call() returns true for both CallNode and MachCallNode. |
|
650 |
bool is_Call() const { |
|
651 |
return (_flags & Flag_is_Call) != 0; |
|
652 |
} |
|
653 |
||
654 |
CallNode *as_Call() const { // Only for CallNode (not for MachCallNode) |
|
655 |
assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class"); |
|
656 |
return (CallNode*)this; |
|
657 |
} |
|
658 |
||
659 |
#define DEFINE_CLASS_QUERY(type) \ |
|
660 |
bool is_##type() const { \ |
|
661 |
return ((_class_id & ClassMask_##type) == Class_##type); \ |
|
662 |
} \ |
|
663 |
type##Node *as_##type() const { \ |
|
664 |
assert(is_##type(), "invalid node class"); \ |
|
665 |
return (type##Node*)this; \ |
|
666 |
} |
|
667 |
||
668 |
DEFINE_CLASS_QUERY(AbstractLock) |
|
669 |
DEFINE_CLASS_QUERY(Add) |
|
670 |
DEFINE_CLASS_QUERY(AddP) |
|
671 |
DEFINE_CLASS_QUERY(Allocate) |
|
672 |
DEFINE_CLASS_QUERY(AllocateArray) |
|
673 |
DEFINE_CLASS_QUERY(Bool) |
|
674 |
DEFINE_CLASS_QUERY(BoxLock) |
|
675 |
DEFINE_CLASS_QUERY(CallDynamicJava) |
|
676 |
DEFINE_CLASS_QUERY(CallJava) |
|
677 |
DEFINE_CLASS_QUERY(CallLeaf) |
|
678 |
DEFINE_CLASS_QUERY(CallRuntime) |
|
679 |
DEFINE_CLASS_QUERY(CallStaticJava) |
|
680 |
DEFINE_CLASS_QUERY(Catch) |
|
681 |
DEFINE_CLASS_QUERY(CatchProj) |
|
682 |
DEFINE_CLASS_QUERY(CheckCastPP) |
|
683 |
DEFINE_CLASS_QUERY(ConstraintCast) |
|
684 |
DEFINE_CLASS_QUERY(CMove) |
|
685 |
DEFINE_CLASS_QUERY(Cmp) |
|
686 |
DEFINE_CLASS_QUERY(CountedLoop) |
|
687 |
DEFINE_CLASS_QUERY(CountedLoopEnd) |
|
688 |
DEFINE_CLASS_QUERY(FastLock) |
|
689 |
DEFINE_CLASS_QUERY(FastUnlock) |
|
690 |
DEFINE_CLASS_QUERY(If) |
|
691 |
DEFINE_CLASS_QUERY(IfFalse) |
|
692 |
DEFINE_CLASS_QUERY(IfTrue) |
|
693 |
DEFINE_CLASS_QUERY(Initialize) |
|
694 |
DEFINE_CLASS_QUERY(Jump) |
|
695 |
DEFINE_CLASS_QUERY(JumpProj) |
|
696 |
DEFINE_CLASS_QUERY(Load) |
|
697 |
DEFINE_CLASS_QUERY(LoadStore) |
|
698 |
DEFINE_CLASS_QUERY(Lock) |
|
699 |
DEFINE_CLASS_QUERY(Loop) |
|
700 |
DEFINE_CLASS_QUERY(Mach) |
|
701 |
DEFINE_CLASS_QUERY(MachCall) |
|
702 |
DEFINE_CLASS_QUERY(MachCallDynamicJava) |
|
703 |
DEFINE_CLASS_QUERY(MachCallJava) |
|
704 |
DEFINE_CLASS_QUERY(MachCallLeaf) |
|
705 |
DEFINE_CLASS_QUERY(MachCallRuntime) |
|
706 |
DEFINE_CLASS_QUERY(MachCallStaticJava) |
|
707 |
DEFINE_CLASS_QUERY(MachIf) |
|
708 |
DEFINE_CLASS_QUERY(MachNullCheck) |
|
709 |
DEFINE_CLASS_QUERY(MachReturn) |
|
710 |
DEFINE_CLASS_QUERY(MachSafePoint) |
|
711 |
DEFINE_CLASS_QUERY(MachSpillCopy) |
|
712 |
DEFINE_CLASS_QUERY(MachTemp) |
|
713 |
DEFINE_CLASS_QUERY(Mem) |
|
714 |
DEFINE_CLASS_QUERY(MemBar) |
|
715 |
DEFINE_CLASS_QUERY(MergeMem) |
|
716 |
DEFINE_CLASS_QUERY(Mul) |
|
717 |
DEFINE_CLASS_QUERY(Multi) |
|
718 |
DEFINE_CLASS_QUERY(MultiBranch) |
|
206 | 719 |
DEFINE_CLASS_QUERY(Parm) |
1 | 720 |
DEFINE_CLASS_QUERY(PCTable) |
721 |
DEFINE_CLASS_QUERY(Phi) |
|
722 |
DEFINE_CLASS_QUERY(Proj) |
|
723 |
DEFINE_CLASS_QUERY(Region) |
|
724 |
DEFINE_CLASS_QUERY(Root) |
|
725 |
DEFINE_CLASS_QUERY(SafePoint) |
|
236
9a04268c8eea
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
213
diff
changeset
|
726 |
DEFINE_CLASS_QUERY(SafePointScalarObject) |
1 | 727 |
DEFINE_CLASS_QUERY(Start) |
728 |
DEFINE_CLASS_QUERY(Store) |
|
729 |
DEFINE_CLASS_QUERY(Sub) |
|
730 |
DEFINE_CLASS_QUERY(Type) |
|
731 |
DEFINE_CLASS_QUERY(Unlock) |
|
732 |
||
733 |
#undef DEFINE_CLASS_QUERY |
|
734 |
||
735 |
// duplicate of is_MachSpillCopy() |
|
736 |
bool is_SpillCopy () const { |
|
737 |
return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); |
|
738 |
} |
|
739 |
||
740 |
bool is_Con () const { return (_flags & Flag_is_Con) != 0; } |
|
741 |
bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; } |
|
742 |
// The data node which is safe to leave in dead loop during IGVN optimization. |
|
743 |
bool is_dead_loop_safe() const { |
|
744 |
return is_Phi() || is_Proj() || |
|
745 |
(_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0; |
|
746 |
} |
|
747 |
||
748 |
// is_Copy() returns copied edge index (0 or 1) |
|
749 |
uint is_Copy() const { return (_flags & Flag_is_Copy); } |
|
750 |
||
751 |
virtual bool is_CFG() const { return false; } |
|
752 |
||
753 |
// If this node is control-dependent on a test, can it be |
|
754 |
// rerouted to a dominating equivalent test? This is usually |
|
755 |
// true of non-CFG nodes, but can be false for operations which |
|
756 |
// depend for their correct sequencing on more than one test. |
|
757 |
// (In that case, hoisting to a dominating test may silently |
|
758 |
// skip some other important test.) |
|
759 |
virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; |
|
760 |
||
761 |
// defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd' |
|
762 |
bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; } |
|
763 |
||
764 |
// When building basic blocks, I need to have a notion of block beginning |
|
765 |
// Nodes, next block selector Nodes (block enders), and next block |
|
766 |
// projections. These calls need to work on their machine equivalents. The |
|
767 |
// Ideal beginning Nodes are RootNode, RegionNode and StartNode. |
|
768 |
bool is_block_start() const { |
|
769 |
if ( is_Region() ) |
|
770 |
return this == (const Node*)in(0); |
|
771 |
else |
|
772 |
return (_flags & Flag_is_block_start) != 0; |
|
773 |
} |
|
774 |
||
775 |
// The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, |
|
776 |
// Goto and Return. This call also returns the block ending Node. |
|
777 |
virtual const Node *is_block_proj() const; |
|
778 |
||
779 |
// The node is a "macro" node which needs to be expanded before matching |
|
780 |
bool is_macro() const { return (_flags & Flag_is_macro) != 0; } |
|
781 |
||
782 |
// Value is a vector of primitive values |
|
783 |
bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; } |
|
784 |
||
785 |
//----------------- Optimization |
|
786 |
||
787 |
// Get the worst-case Type output for this Node. |
|
788 |
virtual const class Type *bottom_type() const; |
|
789 |
||
790 |
// If we find a better type for a node, try to record it permanently. |
|
791 |
// Return true if this node actually changed. |
|
792 |
// Be sure to do the hash_delete game in the "rehash" variant. |
|
793 |
void raise_bottom_type(const Type* new_type); |
|
794 |
||
795 |
// Get the address type with which this node uses and/or defs memory, |
|
796 |
// or NULL if none. The address type is conservatively wide. |
|
797 |
// Returns non-null for calls, membars, loads, stores, etc. |
|
798 |
// Returns TypePtr::BOTTOM if the node touches memory "broadly". |
|
799 |
virtual const class TypePtr *adr_type() const { return NULL; } |
|
800 |
||
801 |
// Return an existing node which computes the same function as this node. |
|
802 |
// The optimistic combined algorithm requires this to return a Node which |
|
803 |
// is a small number of steps away (e.g., one of my inputs). |
|
804 |
virtual Node *Identity( PhaseTransform *phase ); |
|
805 |
||
806 |
// Return the set of values this Node can take on at runtime. |
|
807 |
virtual const Type *Value( PhaseTransform *phase ) const; |
|
808 |
||
809 |
// Return a node which is more "ideal" than the current node. |
|
810 |
// The invariants on this call are subtle. If in doubt, read the |
|
811 |
// treatise in node.cpp above the default implemention AND TEST WITH |
|
812 |
// +VerifyIterativeGVN! |
|
813 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
814 |
||
815 |
// Some nodes have specific Ideal subgraph transformations only if they are |
|
816 |
// unique users of specific nodes. Such nodes should be put on IGVN worklist |
|
817 |
// for the transformations to happen. |
|
818 |
bool has_special_unique_user() const; |
|
819 |
||
258
dbd6f2ed7ba0
6692301: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
238
diff
changeset
|
820 |
// Skip Proj and CatchProj nodes chains. Check for Null and Top. |
dbd6f2ed7ba0
6692301: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
238
diff
changeset
|
821 |
Node* find_exact_control(Node* ctrl); |
dbd6f2ed7ba0
6692301: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
238
diff
changeset
|
822 |
|
dbd6f2ed7ba0
6692301: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
238
diff
changeset
|
823 |
// Check if 'this' node dominates or equal to 'sub'. |
dbd6f2ed7ba0
6692301: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
238
diff
changeset
|
824 |
bool dominates(Node* sub, Node_List &nlist); |
dbd6f2ed7ba0
6692301: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
238
diff
changeset
|
825 |
|
1 | 826 |
protected: |
827 |
bool remove_dead_region(PhaseGVN *phase, bool can_reshape); |
|
828 |
public: |
|
829 |
||
830 |
// Idealize graph, using DU info. Done after constant propagation |
|
831 |
virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); |
|
832 |
||
833 |
// See if there is valid pipeline info |
|
834 |
static const Pipeline *pipeline_class(); |
|
835 |
virtual const Pipeline *pipeline() const; |
|
836 |
||
837 |
// Compute the latency from the def to this instruction of the ith input node |
|
838 |
uint latency(uint i); |
|
839 |
||
840 |
// Hash & compare functions, for pessimistic value numbering |
|
841 |
||
842 |
// If the hash function returns the special sentinel value NO_HASH, |
|
843 |
// the node is guaranteed never to compare equal to any other node. |
|
844 |
// If we accidently generate a hash with value NO_HASH the node |
|
845 |
// won't go into the table and we'll lose a little optimization. |
|
846 |
enum { NO_HASH = 0 }; |
|
847 |
virtual uint hash() const; |
|
848 |
virtual uint cmp( const Node &n ) const; |
|
849 |
||
850 |
// Operation appears to be iteratively computed (such as an induction variable) |
|
851 |
// It is possible for this operation to return false for a loop-varying |
|
852 |
// value, if it appears (by local graph inspection) to be computed by a simple conditional. |
|
853 |
bool is_iteratively_computed(); |
|
854 |
||
855 |
// Determine if a node is Counted loop induction variable. |
|
856 |
// The method is defined in loopnode.cpp. |
|
857 |
const Node* is_loop_iv() const; |
|
858 |
||
859 |
// Return a node with opcode "opc" and same inputs as "this" if one can |
|
860 |
// be found; Otherwise return NULL; |
|
861 |
Node* find_similar(int opc); |
|
862 |
||
863 |
// Return the unique control out if only one. Null if none or more than one. |
|
864 |
Node* unique_ctrl_out(); |
|
865 |
||
866 |
//----------------- Code Generation |
|
867 |
||
868 |
// Ideal register class for Matching. Zero means unmatched instruction |
|
869 |
// (these are cloned instead of converted to machine nodes). |
|
870 |
virtual uint ideal_reg() const; |
|
871 |
||
872 |
static const uint NotAMachineReg; // must be > max. machine register |
|
873 |
||
874 |
// Do we Match on this edge index or not? Generally false for Control |
|
875 |
// and true for everything else. Weird for calls & returns. |
|
876 |
virtual uint match_edge(uint idx) const; |
|
877 |
||
878 |
// Register class output is returned in |
|
879 |
virtual const RegMask &out_RegMask() const; |
|
880 |
// Register class input is expected in |
|
881 |
virtual const RegMask &in_RegMask(uint) const; |
|
882 |
// Should we clone rather than spill this instruction? |
|
883 |
bool rematerialize() const; |
|
884 |
||
885 |
// Return JVM State Object if this Node carries debug info, or NULL otherwise |
|
886 |
virtual JVMState* jvms() const; |
|
887 |
||
888 |
// Print as assembly |
|
889 |
virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; |
|
890 |
// Emit bytes starting at parameter 'ptr' |
|
891 |
// Bump 'ptr' by the number of output bytes |
|
892 |
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; |
|
893 |
// Size of instruction in bytes |
|
894 |
virtual uint size(PhaseRegAlloc *ra_) const; |
|
895 |
||
896 |
// Convenience function to extract an integer constant from a node. |
|
897 |
// If it is not an integer constant (either Con, CastII, or Mach), |
|
898 |
// return value_if_unknown. |
|
899 |
jint find_int_con(jint value_if_unknown) const { |
|
900 |
const TypeInt* t = find_int_type(); |
|
901 |
return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; |
|
902 |
} |
|
903 |
// Return the constant, knowing it is an integer constant already |
|
904 |
jint get_int() const { |
|
905 |
const TypeInt* t = find_int_type(); |
|
906 |
guarantee(t != NULL, "must be con"); |
|
907 |
return t->get_con(); |
|
908 |
} |
|
909 |
// Here's where the work is done. Can produce non-constant int types too. |
|
910 |
const TypeInt* find_int_type() const; |
|
911 |
||
912 |
// Same thing for long (and intptr_t, via type.hpp): |
|
913 |
jlong get_long() const { |
|
914 |
const TypeLong* t = find_long_type(); |
|
915 |
guarantee(t != NULL, "must be con"); |
|
916 |
return t->get_con(); |
|
917 |
} |
|
918 |
jlong find_long_con(jint value_if_unknown) const { |
|
919 |
const TypeLong* t = find_long_type(); |
|
920 |
return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; |
|
921 |
} |
|
922 |
const TypeLong* find_long_type() const; |
|
923 |
||
924 |
// These guys are called by code generated by ADLC: |
|
925 |
intptr_t get_ptr() const; |
|
926 |
jdouble getd() const; |
|
927 |
jfloat getf() const; |
|
928 |
||
929 |
// Nodes which are pinned into basic blocks |
|
930 |
virtual bool pinned() const { return false; } |
|
931 |
||
932 |
// Nodes which use memory without consuming it, hence need antidependences |
|
933 |
// More specifically, needs_anti_dependence_check returns true iff the node |
|
934 |
// (a) does a load, and (b) does not perform a store (except perhaps to a |
|
935 |
// stack slot or some other unaliased location). |
|
936 |
bool needs_anti_dependence_check() const; |
|
937 |
||
938 |
// Return which operand this instruction may cisc-spill. In other words, |
|
939 |
// return operand position that can convert from reg to memory access |
|
940 |
virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } |
|
941 |
bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } |
|
942 |
||
943 |
//----------------- Graph walking |
|
944 |
public: |
|
945 |
// Walk and apply member functions recursively. |
|
946 |
// Supplied (this) pointer is root. |
|
947 |
void walk(NFunc pre, NFunc post, void *env); |
|
948 |
static void nop(Node &, void*); // Dummy empty function |
|
949 |
static void packregion( Node &n, void* ); |
|
950 |
private: |
|
951 |
void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); |
|
952 |
||
953 |
//----------------- Printing, etc |
|
954 |
public: |
|
955 |
#ifndef PRODUCT |
|
956 |
Node* find(int idx) const; // Search the graph for the given idx. |
|
957 |
Node* find_ctrl(int idx) const; // Search control ancestors for the given idx. |
|
958 |
void dump() const; // Print this node, |
|
959 |
void dump(int depth) const; // Print this node, recursively to depth d |
|
960 |
void dump_ctrl(int depth) const; // Print control nodes, to depth d |
|
961 |
virtual void dump_req() const; // Print required-edge info |
|
962 |
virtual void dump_prec() const; // Print precedence-edge info |
|
963 |
virtual void dump_out() const; // Print the output edge info |
|
964 |
virtual void dump_spec(outputStream *st) const {}; // Print per-node info |
|
965 |
void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges |
|
966 |
void verify() const; // Check Def-Use info for my subgraph |
|
967 |
static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space); |
|
968 |
||
969 |
// This call defines a class-unique string used to identify class instances |
|
970 |
virtual const char *Name() const; |
|
971 |
||
972 |
void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) |
|
973 |
// RegMask Print Functions |
|
974 |
void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } |
|
975 |
void dump_out_regmask() { out_RegMask().dump(); } |
|
976 |
static int _in_dump_cnt; |
|
977 |
static bool in_dump() { return _in_dump_cnt > 0; } |
|
978 |
void fast_dump() const { |
|
979 |
tty->print("%4d: %-17s", _idx, Name()); |
|
980 |
for (uint i = 0; i < len(); i++) |
|
981 |
if (in(i)) |
|
982 |
tty->print(" %4d", in(i)->_idx); |
|
983 |
else |
|
984 |
tty->print(" NULL"); |
|
985 |
tty->print("\n"); |
|
986 |
} |
|
987 |
#endif |
|
988 |
#ifdef ASSERT |
|
989 |
void verify_construction(); |
|
990 |
bool verify_jvms(const JVMState* jvms) const; |
|
991 |
int _debug_idx; // Unique value assigned to every node. |
|
992 |
int debug_idx() const { return _debug_idx; } |
|
993 |
void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } |
|
994 |
||
995 |
Node* _debug_orig; // Original version of this, if any. |
|
996 |
Node* debug_orig() const { return _debug_orig; } |
|
997 |
void set_debug_orig(Node* orig); // _debug_orig = orig |
|
998 |
||
999 |
int _hash_lock; // Barrier to modifications of nodes in the hash table |
|
1000 |
void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } |
|
1001 |
void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } |
|
1002 |
||
1003 |
static void init_NodeProperty(); |
|
1004 |
||
1005 |
#if OPTO_DU_ITERATOR_ASSERT |
|
1006 |
const Node* _last_del; // The last deleted node. |
|
1007 |
uint _del_tick; // Bumped when a deletion happens.. |
|
1008 |
#endif |
|
1009 |
#endif |
|
1010 |
}; |
|
1011 |
||
1012 |
//----------------------------------------------------------------------------- |
|
1013 |
// Iterators over DU info, and associated Node functions. |
|
1014 |
||
1015 |
#if OPTO_DU_ITERATOR_ASSERT |
|
1016 |
||
1017 |
// Common code for assertion checking on DU iterators. |
|
1018 |
class DUIterator_Common VALUE_OBJ_CLASS_SPEC { |
|
1019 |
#ifdef ASSERT |
|
1020 |
protected: |
|
1021 |
bool _vdui; // cached value of VerifyDUIterators |
|
1022 |
const Node* _node; // the node containing the _out array |
|
1023 |
uint _outcnt; // cached node->_outcnt |
|
1024 |
uint _del_tick; // cached node->_del_tick |
|
1025 |
Node* _last; // last value produced by the iterator |
|
1026 |
||
1027 |
void sample(const Node* node); // used by c'tor to set up for verifies |
|
1028 |
void verify(const Node* node, bool at_end_ok = false); |
|
1029 |
void verify_resync(); |
|
1030 |
void reset(const DUIterator_Common& that); |
|
1031 |
||
1032 |
// The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators |
|
1033 |
#define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } |
|
1034 |
#else |
|
1035 |
#define I_VDUI_ONLY(i,x) { } |
|
1036 |
#endif //ASSERT |
|
1037 |
}; |
|
1038 |
||
1039 |
#define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) |
|
1040 |
||
1041 |
// Default DU iterator. Allows appends onto the out array. |
|
1042 |
// Allows deletion from the out array only at the current point. |
|
1043 |
// Usage: |
|
1044 |
// for (DUIterator i = x->outs(); x->has_out(i); i++) { |
|
1045 |
// Node* y = x->out(i); |
|
1046 |
// ... |
|
1047 |
// } |
|
1048 |
// Compiles in product mode to a unsigned integer index, which indexes |
|
1049 |
// onto a repeatedly reloaded base pointer of x->_out. The loop predicate |
|
1050 |
// also reloads x->_outcnt. If you delete, you must perform "--i" just |
|
1051 |
// before continuing the loop. You must delete only the last-produced |
|
1052 |
// edge. You must delete only a single copy of the last-produced edge, |
|
1053 |
// or else you must delete all copies at once (the first time the edge |
|
1054 |
// is produced by the iterator). |
|
1055 |
class DUIterator : public DUIterator_Common { |
|
1056 |
friend class Node; |
|
1057 |
||
1058 |
// This is the index which provides the product-mode behavior. |
|
1059 |
// Whatever the product-mode version of the system does to the |
|
1060 |
// DUI index is done to this index. All other fields in |
|
1061 |
// this class are used only for assertion checking. |
|
1062 |
uint _idx; |
|
1063 |
||
1064 |
#ifdef ASSERT |
|
1065 |
uint _refresh_tick; // Records the refresh activity. |
|
1066 |
||
1067 |
void sample(const Node* node); // Initialize _refresh_tick etc. |
|
1068 |
void verify(const Node* node, bool at_end_ok = false); |
|
1069 |
void verify_increment(); // Verify an increment operation. |
|
1070 |
void verify_resync(); // Verify that we can back up over a deletion. |
|
1071 |
void verify_finish(); // Verify that the loop terminated properly. |
|
1072 |
void refresh(); // Resample verification info. |
|
1073 |
void reset(const DUIterator& that); // Resample after assignment. |
|
1074 |
#endif |
|
1075 |
||
1076 |
DUIterator(const Node* node, int dummy_to_avoid_conversion) |
|
1077 |
{ _idx = 0; debug_only(sample(node)); } |
|
1078 |
||
1079 |
public: |
|
1080 |
// initialize to garbage; clear _vdui to disable asserts |
|
1081 |
DUIterator() |
|
1082 |
{ /*initialize to garbage*/ debug_only(_vdui = false); } |
|
1083 |
||
1084 |
void operator++(int dummy_to_specify_postfix_op) |
|
1085 |
{ _idx++; VDUI_ONLY(verify_increment()); } |
|
1086 |
||
1087 |
void operator--() |
|
1088 |
{ VDUI_ONLY(verify_resync()); --_idx; } |
|
1089 |
||
1090 |
~DUIterator() |
|
1091 |
{ VDUI_ONLY(verify_finish()); } |
|
1092 |
||
1093 |
void operator=(const DUIterator& that) |
|
1094 |
{ _idx = that._idx; debug_only(reset(that)); } |
|
1095 |
}; |
|
1096 |
||
1097 |
DUIterator Node::outs() const |
|
1098 |
{ return DUIterator(this, 0); } |
|
1099 |
DUIterator& Node::refresh_out_pos(DUIterator& i) const |
|
1100 |
{ I_VDUI_ONLY(i, i.refresh()); return i; } |
|
1101 |
bool Node::has_out(DUIterator& i) const |
|
1102 |
{ I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } |
|
1103 |
Node* Node::out(DUIterator& i) const |
|
1104 |
{ I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } |
|
1105 |
||
1106 |
||
1107 |
// Faster DU iterator. Disallows insertions into the out array. |
|
1108 |
// Allows deletion from the out array only at the current point. |
|
1109 |
// Usage: |
|
1110 |
// for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { |
|
1111 |
// Node* y = x->fast_out(i); |
|
1112 |
// ... |
|
1113 |
// } |
|
1114 |
// Compiles in product mode to raw Node** pointer arithmetic, with |
|
1115 |
// no reloading of pointers from the original node x. If you delete, |
|
1116 |
// you must perform "--i; --imax" just before continuing the loop. |
|
1117 |
// If you delete multiple copies of the same edge, you must decrement |
|
1118 |
// imax, but not i, multiple times: "--i, imax -= num_edges". |
|
1119 |
class DUIterator_Fast : public DUIterator_Common { |
|
1120 |
friend class Node; |
|
1121 |
friend class DUIterator_Last; |
|
1122 |
||
1123 |
// This is the pointer which provides the product-mode behavior. |
|
1124 |
// Whatever the product-mode version of the system does to the |
|
1125 |
// DUI pointer is done to this pointer. All other fields in |
|
1126 |
// this class are used only for assertion checking. |
|
1127 |
Node** _outp; |
|
1128 |
||
1129 |
#ifdef ASSERT |
|
1130 |
void verify(const Node* node, bool at_end_ok = false); |
|
1131 |
void verify_limit(); |
|
1132 |
void verify_resync(); |
|
1133 |
void verify_relimit(uint n); |
|
1134 |
void reset(const DUIterator_Fast& that); |
|
1135 |
#endif |
|
1136 |
||
1137 |
// Note: offset must be signed, since -1 is sometimes passed |
|
1138 |
DUIterator_Fast(const Node* node, ptrdiff_t offset) |
|
1139 |
{ _outp = node->_out + offset; debug_only(sample(node)); } |
|
1140 |
||
1141 |
public: |
|
1142 |
// initialize to garbage; clear _vdui to disable asserts |
|
1143 |
DUIterator_Fast() |
|
1144 |
{ /*initialize to garbage*/ debug_only(_vdui = false); } |
|
1145 |
||
1146 |
void operator++(int dummy_to_specify_postfix_op) |
|
1147 |
{ _outp++; VDUI_ONLY(verify(_node, true)); } |
|
1148 |
||
1149 |
void operator--() |
|
1150 |
{ VDUI_ONLY(verify_resync()); --_outp; } |
|
1151 |
||
1152 |
void operator-=(uint n) // applied to the limit only |
|
1153 |
{ _outp -= n; VDUI_ONLY(verify_relimit(n)); } |
|
1154 |
||
1155 |
bool operator<(DUIterator_Fast& limit) { |
|
1156 |
I_VDUI_ONLY(*this, this->verify(_node, true)); |
|
1157 |
I_VDUI_ONLY(limit, limit.verify_limit()); |
|
1158 |
return _outp < limit._outp; |
|
1159 |
} |
|
1160 |
||
1161 |
void operator=(const DUIterator_Fast& that) |
|
1162 |
{ _outp = that._outp; debug_only(reset(that)); } |
|
1163 |
}; |
|
1164 |
||
1165 |
DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { |
|
1166 |
// Assign a limit pointer to the reference argument: |
|
1167 |
imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); |
|
1168 |
// Return the base pointer: |
|
1169 |
return DUIterator_Fast(this, 0); |
|
1170 |
} |
|
1171 |
Node* Node::fast_out(DUIterator_Fast& i) const { |
|
1172 |
I_VDUI_ONLY(i, i.verify(this)); |
|
1173 |
return debug_only(i._last=) *i._outp; |
|
1174 |
} |
|
1175 |
||
1176 |
||
1177 |
// Faster DU iterator. Requires each successive edge to be removed. |
|
1178 |
// Does not allow insertion of any edges. |
|
1179 |
// Usage: |
|
1180 |
// for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { |
|
1181 |
// Node* y = x->last_out(i); |
|
1182 |
// ... |
|
1183 |
// } |
|
1184 |
// Compiles in product mode to raw Node** pointer arithmetic, with |
|
1185 |
// no reloading of pointers from the original node x. |
|
1186 |
class DUIterator_Last : private DUIterator_Fast { |
|
1187 |
friend class Node; |
|
1188 |
||
1189 |
#ifdef ASSERT |
|
1190 |
void verify(const Node* node, bool at_end_ok = false); |
|
1191 |
void verify_limit(); |
|
1192 |
void verify_step(uint num_edges); |
|
1193 |
#endif |
|
1194 |
||
1195 |
// Note: offset must be signed, since -1 is sometimes passed |
|
1196 |
DUIterator_Last(const Node* node, ptrdiff_t offset) |
|
1197 |
: DUIterator_Fast(node, offset) { } |
|
1198 |
||
1199 |
void operator++(int dummy_to_specify_postfix_op) {} // do not use |
|
1200 |
void operator<(int) {} // do not use |
|
1201 |
||
1202 |
public: |
|
1203 |
DUIterator_Last() { } |
|
1204 |
// initialize to garbage |
|
1205 |
||
1206 |
void operator--() |
|
1207 |
{ _outp--; VDUI_ONLY(verify_step(1)); } |
|
1208 |
||
1209 |
void operator-=(uint n) |
|
1210 |
{ _outp -= n; VDUI_ONLY(verify_step(n)); } |
|
1211 |
||
1212 |
bool operator>=(DUIterator_Last& limit) { |
|
1213 |
I_VDUI_ONLY(*this, this->verify(_node, true)); |
|
1214 |
I_VDUI_ONLY(limit, limit.verify_limit()); |
|
1215 |
return _outp >= limit._outp; |
|
1216 |
} |
|
1217 |
||
1218 |
void operator=(const DUIterator_Last& that) |
|
1219 |
{ DUIterator_Fast::operator=(that); } |
|
1220 |
}; |
|
1221 |
||
1222 |
DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { |
|
1223 |
// Assign a limit pointer to the reference argument: |
|
1224 |
imin = DUIterator_Last(this, 0); |
|
1225 |
// Return the initial pointer: |
|
1226 |
return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); |
|
1227 |
} |
|
1228 |
Node* Node::last_out(DUIterator_Last& i) const { |
|
1229 |
I_VDUI_ONLY(i, i.verify(this)); |
|
1230 |
return debug_only(i._last=) *i._outp; |
|
1231 |
} |
|
1232 |
||
1233 |
#endif //OPTO_DU_ITERATOR_ASSERT |
|
1234 |
||
1235 |
#undef I_VDUI_ONLY |
|
1236 |
#undef VDUI_ONLY |
|
1237 |
||
1238 |
||
1239 |
//----------------------------------------------------------------------------- |
|
1240 |
// Map dense integer indices to Nodes. Uses classic doubling-array trick. |
|
1241 |
// Abstractly provides an infinite array of Node*'s, initialized to NULL. |
|
1242 |
// Note that the constructor just zeros things, and since I use Arena |
|
1243 |
// allocation I do not need a destructor to reclaim storage. |
|
1244 |
class Node_Array : public ResourceObj { |
|
1245 |
protected: |
|
1246 |
Arena *_a; // Arena to allocate in |
|
1247 |
uint _max; |
|
1248 |
Node **_nodes; |
|
1249 |
void grow( uint i ); // Grow array node to fit |
|
1250 |
public: |
|
1251 |
Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { |
|
1252 |
_nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); |
|
1253 |
for( int i = 0; i < OptoNodeListSize; i++ ) { |
|
1254 |
_nodes[i] = NULL; |
|
1255 |
} |
|
1256 |
} |
|
1257 |
||
1258 |
Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} |
|
1259 |
Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped |
|
1260 |
{ return (i<_max) ? _nodes[i] : (Node*)NULL; } |
|
1261 |
Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; } |
|
1262 |
Node **adr() { return _nodes; } |
|
1263 |
// Extend the mapping: index i maps to Node *n. |
|
1264 |
void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } |
|
1265 |
void insert( uint i, Node *n ); |
|
1266 |
void remove( uint i ); // Remove, preserving order |
|
1267 |
void sort( C_sort_func_t func); |
|
1268 |
void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage |
|
1269 |
void clear(); // Set all entries to NULL, keep storage |
|
1270 |
uint Size() const { return _max; } |
|
1271 |
void dump() const; |
|
1272 |
}; |
|
1273 |
||
1274 |
class Node_List : public Node_Array { |
|
1275 |
uint _cnt; |
|
1276 |
public: |
|
1277 |
Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} |
|
1278 |
Node_List(Arena *a) : Node_Array(a), _cnt(0) {} |
|
1279 |
void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } |
|
1280 |
void remove( uint i ) { Node_Array::remove(i); _cnt--; } |
|
1281 |
void push( Node *b ) { map(_cnt++,b); } |
|
1282 |
void yank( Node *n ); // Find and remove |
|
1283 |
Node *pop() { return _nodes[--_cnt]; } |
|
1284 |
Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} |
|
1285 |
void clear() { _cnt = 0; Node_Array::clear(); } // retain storage |
|
1286 |
uint size() const { return _cnt; } |
|
1287 |
void dump() const; |
|
1288 |
}; |
|
1289 |
||
1290 |
//------------------------------Unique_Node_List------------------------------- |
|
1291 |
class Unique_Node_List : public Node_List { |
|
1292 |
VectorSet _in_worklist; |
|
1293 |
uint _clock_index; // Index in list where to pop from next |
|
1294 |
public: |
|
1295 |
Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {} |
|
1296 |
Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} |
|
1297 |
||
1298 |
void remove( Node *n ); |
|
1299 |
bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } |
|
1300 |
VectorSet &member_set(){ return _in_worklist; } |
|
1301 |
||
1302 |
void push( Node *b ) { |
|
1303 |
if( !_in_worklist.test_set(b->_idx) ) |
|
1304 |
Node_List::push(b); |
|
1305 |
} |
|
1306 |
Node *pop() { |
|
1307 |
if( _clock_index >= size() ) _clock_index = 0; |
|
1308 |
Node *b = at(_clock_index); |
|
1309 |
map( _clock_index++, Node_List::pop()); |
|
1310 |
_in_worklist >>= b->_idx; |
|
1311 |
return b; |
|
1312 |
} |
|
1313 |
Node *remove( uint i ) { |
|
1314 |
Node *b = Node_List::at(i); |
|
1315 |
_in_worklist >>= b->_idx; |
|
1316 |
map(i,Node_List::pop()); |
|
1317 |
return b; |
|
1318 |
} |
|
1319 |
void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); } |
|
1320 |
void clear() { |
|
1321 |
_in_worklist.Clear(); // Discards storage but grows automatically |
|
1322 |
Node_List::clear(); |
|
1323 |
_clock_index = 0; |
|
1324 |
} |
|
1325 |
||
1326 |
// Used after parsing to remove useless nodes before Iterative GVN |
|
1327 |
void remove_useless_nodes(VectorSet &useful); |
|
1328 |
||
1329 |
#ifndef PRODUCT |
|
1330 |
void print_set() const { _in_worklist.print(); } |
|
1331 |
#endif |
|
1332 |
}; |
|
1333 |
||
1334 |
// Inline definition of Compile::record_for_igvn must be deferred to this point. |
|
1335 |
inline void Compile::record_for_igvn(Node* n) { |
|
1336 |
_for_igvn->push(n); |
|
1337 |
} |
|
1338 |
||
1339 |
//------------------------------Node_Stack------------------------------------- |
|
1340 |
class Node_Stack { |
|
1341 |
protected: |
|
1342 |
struct INode { |
|
1343 |
Node *node; // Processed node |
|
1344 |
uint indx; // Index of next node's child |
|
1345 |
}; |
|
1346 |
INode *_inode_top; // tos, stack grows up |
|
1347 |
INode *_inode_max; // End of _inodes == _inodes + _max |
|
1348 |
INode *_inodes; // Array storage for the stack |
|
1349 |
Arena *_a; // Arena to allocate in |
|
1350 |
void grow(); |
|
1351 |
public: |
|
1352 |
Node_Stack(int size) { |
|
1353 |
size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; |
|
1354 |
_a = Thread::current()->resource_area(); |
|
1355 |
_inodes = NEW_ARENA_ARRAY( _a, INode, max ); |
|
1356 |
_inode_max = _inodes + max; |
|
1357 |
_inode_top = _inodes - 1; // stack is empty |
|
1358 |
} |
|
1359 |
||
1360 |
Node_Stack(Arena *a, int size) : _a(a) { |
|
1361 |
size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; |
|
1362 |
_inodes = NEW_ARENA_ARRAY( _a, INode, max ); |
|
1363 |
_inode_max = _inodes + max; |
|
1364 |
_inode_top = _inodes - 1; // stack is empty |
|
1365 |
} |
|
1366 |
||
1367 |
void pop() { |
|
1368 |
assert(_inode_top >= _inodes, "node stack underflow"); |
|
1369 |
--_inode_top; |
|
1370 |
} |
|
1371 |
void push(Node *n, uint i) { |
|
1372 |
++_inode_top; |
|
1373 |
if (_inode_top >= _inode_max) grow(); |
|
1374 |
INode *top = _inode_top; // optimization |
|
1375 |
top->node = n; |
|
1376 |
top->indx = i; |
|
1377 |
} |
|
1378 |
Node *node() const { |
|
1379 |
return _inode_top->node; |
|
1380 |
} |
|
1381 |
Node* node_at(uint i) const { |
|
1382 |
assert(_inodes + i <= _inode_top, "in range"); |
|
1383 |
return _inodes[i].node; |
|
1384 |
} |
|
1385 |
uint index() const { |
|
1386 |
return _inode_top->indx; |
|
1387 |
} |
|
1388 |
void set_node(Node *n) { |
|
1389 |
_inode_top->node = n; |
|
1390 |
} |
|
1391 |
void set_index(uint i) { |
|
1392 |
_inode_top->indx = i; |
|
1393 |
} |
|
1394 |
uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size |
|
213 | 1395 |
uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size |
1 | 1396 |
bool is_nonempty() const { return (_inode_top >= _inodes); } |
1397 |
bool is_empty() const { return (_inode_top < _inodes); } |
|
1398 |
void clear() { _inode_top = _inodes - 1; } // retain storage |
|
1399 |
}; |
|
1400 |
||
1401 |
||
1402 |
//-----------------------------Node_Notes-------------------------------------- |
|
1403 |
// Debugging or profiling annotations loosely and sparsely associated |
|
1404 |
// with some nodes. See Compile::node_notes_at for the accessor. |
|
1405 |
class Node_Notes VALUE_OBJ_CLASS_SPEC { |
|
1406 |
JVMState* _jvms; |
|
1407 |
||
1408 |
public: |
|
1409 |
Node_Notes(JVMState* jvms = NULL) { |
|
1410 |
_jvms = jvms; |
|
1411 |
} |
|
1412 |
||
1413 |
JVMState* jvms() { return _jvms; } |
|
1414 |
void set_jvms(JVMState* x) { _jvms = x; } |
|
1415 |
||
1416 |
// True if there is nothing here. |
|
1417 |
bool is_clear() { |
|
1418 |
return (_jvms == NULL); |
|
1419 |
} |
|
1420 |
||
1421 |
// Make there be nothing here. |
|
1422 |
void clear() { |
|
1423 |
_jvms = NULL; |
|
1424 |
} |
|
1425 |
||
1426 |
// Make a new, clean node notes. |
|
1427 |
static Node_Notes* make(Compile* C) { |
|
1428 |
Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); |
|
1429 |
nn->clear(); |
|
1430 |
return nn; |
|
1431 |
} |
|
1432 |
||
1433 |
Node_Notes* clone(Compile* C) { |
|
1434 |
Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); |
|
1435 |
(*nn) = (*this); |
|
1436 |
return nn; |
|
1437 |
} |
|
1438 |
||
1439 |
// Absorb any information from source. |
|
1440 |
bool update_from(Node_Notes* source) { |
|
1441 |
bool changed = false; |
|
1442 |
if (source != NULL) { |
|
1443 |
if (source->jvms() != NULL) { |
|
1444 |
set_jvms(source->jvms()); |
|
1445 |
changed = true; |
|
1446 |
} |
|
1447 |
} |
|
1448 |
return changed; |
|
1449 |
} |
|
1450 |
}; |
|
1451 |
||
1452 |
// Inlined accessors for Compile::node_nodes that require the preceding class: |
|
1453 |
inline Node_Notes* |
|
1454 |
Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, |
|
1455 |
int idx, bool can_grow) { |
|
1456 |
assert(idx >= 0, "oob"); |
|
1457 |
int block_idx = (idx >> _log2_node_notes_block_size); |
|
1458 |
int grow_by = (block_idx - (arr == NULL? 0: arr->length())); |
|
1459 |
if (grow_by >= 0) { |
|
1460 |
if (!can_grow) return NULL; |
|
1461 |
grow_node_notes(arr, grow_by + 1); |
|
1462 |
} |
|
1463 |
// (Every element of arr is a sub-array of length _node_notes_block_size.) |
|
1464 |
return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); |
|
1465 |
} |
|
1466 |
||
1467 |
inline bool |
|
1468 |
Compile::set_node_notes_at(int idx, Node_Notes* value) { |
|
1469 |
if (value == NULL || value->is_clear()) |
|
1470 |
return false; // nothing to write => write nothing |
|
1471 |
Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); |
|
1472 |
assert(loc != NULL, ""); |
|
1473 |
return loc->update_from(value); |
|
1474 |
} |
|
1475 |
||
1476 |
||
1477 |
//------------------------------TypeNode--------------------------------------- |
|
1478 |
// Node with a Type constant. |
|
1479 |
class TypeNode : public Node { |
|
1480 |
protected: |
|
1481 |
virtual uint hash() const; // Check the type |
|
1482 |
virtual uint cmp( const Node &n ) const; |
|
1483 |
virtual uint size_of() const; // Size is bigger |
|
1484 |
const Type* const _type; |
|
1485 |
public: |
|
1486 |
void set_type(const Type* t) { |
|
1487 |
assert(t != NULL, "sanity"); |
|
1488 |
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); |
|
1489 |
*(const Type**)&_type = t; // cast away const-ness |
|
1490 |
// If this node is in the hash table, make sure it doesn't need a rehash. |
|
1491 |
assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); |
|
1492 |
} |
|
1493 |
const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; |
|
1494 |
TypeNode( const Type *t, uint required ) : Node(required), _type(t) { |
|
1495 |
init_class_id(Class_Type); |
|
1496 |
} |
|
1497 |
virtual const Type *Value( PhaseTransform *phase ) const; |
|
1498 |
virtual const Type *bottom_type() const; |
|
1499 |
virtual uint ideal_reg() const; |
|
1500 |
#ifndef PRODUCT |
|
1501 |
virtual void dump_spec(outputStream *st) const; |
|
1502 |
#endif |
|
1503 |
}; |