1
|
1 |
/*
|
|
2 |
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
// Portions of code courtesy of Clifford Click
|
|
26 |
|
|
27 |
class MultiNode;
|
|
28 |
class PhaseCCP;
|
|
29 |
class PhaseTransform;
|
|
30 |
|
|
31 |
//------------------------------MemNode----------------------------------------
|
|
32 |
// Load or Store, possibly throwing a NULL pointer exception
|
|
33 |
class MemNode : public Node {
|
|
34 |
protected:
|
|
35 |
#ifdef ASSERT
|
|
36 |
const TypePtr* _adr_type; // What kind of memory is being addressed?
|
|
37 |
#endif
|
|
38 |
virtual uint size_of() const; // Size is bigger (ASSERT only)
|
|
39 |
public:
|
|
40 |
enum { Control, // When is it safe to do this load?
|
|
41 |
Memory, // Chunk of memory is being loaded from
|
|
42 |
Address, // Actually address, derived from base
|
|
43 |
ValueIn, // Value to store
|
|
44 |
OopStore // Preceeding oop store, only in StoreCM
|
|
45 |
};
|
|
46 |
protected:
|
|
47 |
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
|
|
48 |
: Node(c0,c1,c2 ) {
|
|
49 |
init_class_id(Class_Mem);
|
|
50 |
debug_only(_adr_type=at; adr_type();)
|
|
51 |
}
|
|
52 |
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
|
|
53 |
: Node(c0,c1,c2,c3) {
|
|
54 |
init_class_id(Class_Mem);
|
|
55 |
debug_only(_adr_type=at; adr_type();)
|
|
56 |
}
|
|
57 |
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
|
|
58 |
: Node(c0,c1,c2,c3,c4) {
|
|
59 |
init_class_id(Class_Mem);
|
|
60 |
debug_only(_adr_type=at; adr_type();)
|
|
61 |
}
|
|
62 |
|
|
63 |
// Helpers for the optimizer. Documented in memnode.cpp.
|
|
64 |
static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
|
|
65 |
Node* p2, AllocateNode* a2,
|
|
66 |
PhaseTransform* phase);
|
|
67 |
static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
|
|
68 |
|
|
69 |
public:
|
|
70 |
// This one should probably be a phase-specific function:
|
|
71 |
static bool detect_dominating_control(Node* dom, Node* sub);
|
|
72 |
|
|
73 |
// Is this Node a MemNode or some descendent? Default is YES.
|
|
74 |
virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
|
|
75 |
|
|
76 |
virtual const class TypePtr *adr_type() const; // returns bottom_type of address
|
|
77 |
|
|
78 |
// Shared code for Ideal methods:
|
|
79 |
Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL.
|
|
80 |
|
|
81 |
// Helper function for adr_type() implementations.
|
|
82 |
static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
|
|
83 |
|
|
84 |
// Raw access function, to allow copying of adr_type efficiently in
|
|
85 |
// product builds and retain the debug info for debug builds.
|
|
86 |
const TypePtr *raw_adr_type() const {
|
|
87 |
#ifdef ASSERT
|
|
88 |
return _adr_type;
|
|
89 |
#else
|
|
90 |
return 0;
|
|
91 |
#endif
|
|
92 |
}
|
|
93 |
|
|
94 |
// Map a load or store opcode to its corresponding store opcode.
|
|
95 |
// (Return -1 if unknown.)
|
|
96 |
virtual int store_Opcode() const { return -1; }
|
|
97 |
|
|
98 |
// What is the type of the value in memory? (T_VOID mean "unspecified".)
|
|
99 |
virtual BasicType memory_type() const = 0;
|
|
100 |
virtual int memory_size() const { return type2aelembytes[memory_type()]; }
|
|
101 |
|
|
102 |
// Search through memory states which precede this node (load or store).
|
|
103 |
// Look for an exact match for the address, with no intervening
|
|
104 |
// aliased stores.
|
|
105 |
Node* find_previous_store(PhaseTransform* phase);
|
|
106 |
|
|
107 |
// Can this node (load or store) accurately see a stored value in
|
|
108 |
// the given memory state? (The state may or may not be in(Memory).)
|
|
109 |
Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
|
|
110 |
|
|
111 |
#ifndef PRODUCT
|
|
112 |
static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
|
|
113 |
virtual void dump_spec(outputStream *st) const;
|
|
114 |
#endif
|
|
115 |
};
|
|
116 |
|
|
117 |
//------------------------------LoadNode---------------------------------------
|
|
118 |
// Load value; requires Memory and Address
|
|
119 |
class LoadNode : public MemNode {
|
|
120 |
protected:
|
|
121 |
virtual uint cmp( const Node &n ) const;
|
|
122 |
virtual uint size_of() const; // Size is bigger
|
|
123 |
const Type* const _type; // What kind of value is loaded?
|
|
124 |
public:
|
|
125 |
|
|
126 |
LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
|
|
127 |
: MemNode(c,mem,adr,at), _type(rt) {
|
|
128 |
init_class_id(Class_Load);
|
|
129 |
}
|
|
130 |
|
|
131 |
// Polymorphic factory method:
|
|
132 |
static LoadNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, BasicType bt );
|
|
133 |
|
|
134 |
virtual uint hash() const; // Check the type
|
|
135 |
|
|
136 |
// Handle algebraic identities here. If we have an identity, return the Node
|
|
137 |
// we are equivalent to. We look for Load of a Store.
|
|
138 |
virtual Node *Identity( PhaseTransform *phase );
|
|
139 |
|
|
140 |
// If the load is from Field memory and the pointer is non-null, we can
|
|
141 |
// zero out the control input.
|
|
142 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
143 |
|
|
144 |
// Compute a new Type for this node. Basically we just do the pre-check,
|
|
145 |
// then call the virtual add() to set the type.
|
|
146 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
147 |
|
|
148 |
virtual uint ideal_reg() const;
|
|
149 |
virtual const Type *bottom_type() const;
|
|
150 |
// Following method is copied from TypeNode:
|
|
151 |
void set_type(const Type* t) {
|
|
152 |
assert(t != NULL, "sanity");
|
|
153 |
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
|
|
154 |
*(const Type**)&_type = t; // cast away const-ness
|
|
155 |
// If this node is in the hash table, make sure it doesn't need a rehash.
|
|
156 |
assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
|
|
157 |
}
|
|
158 |
const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
|
|
159 |
|
|
160 |
// Do not match memory edge
|
|
161 |
virtual uint match_edge(uint idx) const;
|
|
162 |
|
|
163 |
// Map a load opcode to its corresponding store opcode.
|
|
164 |
virtual int store_Opcode() const = 0;
|
|
165 |
|
|
166 |
#ifndef PRODUCT
|
|
167 |
virtual void dump_spec(outputStream *st) const;
|
|
168 |
#endif
|
|
169 |
protected:
|
|
170 |
const Type* load_array_final_field(const TypeKlassPtr *tkls,
|
|
171 |
ciKlass* klass) const;
|
|
172 |
};
|
|
173 |
|
|
174 |
//------------------------------LoadBNode--------------------------------------
|
|
175 |
// Load a byte (8bits signed) from memory
|
|
176 |
class LoadBNode : public LoadNode {
|
|
177 |
public:
|
|
178 |
LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
|
|
179 |
: LoadNode(c,mem,adr,at,ti) {}
|
|
180 |
virtual int Opcode() const;
|
|
181 |
virtual uint ideal_reg() const { return Op_RegI; }
|
|
182 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
183 |
virtual int store_Opcode() const { return Op_StoreB; }
|
|
184 |
virtual BasicType memory_type() const { return T_BYTE; }
|
|
185 |
};
|
|
186 |
|
|
187 |
//------------------------------LoadCNode--------------------------------------
|
|
188 |
// Load a char (16bits unsigned) from memory
|
|
189 |
class LoadCNode : public LoadNode {
|
|
190 |
public:
|
|
191 |
LoadCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
|
|
192 |
: LoadNode(c,mem,adr,at,ti) {}
|
|
193 |
virtual int Opcode() const;
|
|
194 |
virtual uint ideal_reg() const { return Op_RegI; }
|
|
195 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
196 |
virtual int store_Opcode() const { return Op_StoreC; }
|
|
197 |
virtual BasicType memory_type() const { return T_CHAR; }
|
|
198 |
};
|
|
199 |
|
|
200 |
//------------------------------LoadINode--------------------------------------
|
|
201 |
// Load an integer from memory
|
|
202 |
class LoadINode : public LoadNode {
|
|
203 |
public:
|
|
204 |
LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
|
|
205 |
: LoadNode(c,mem,adr,at,ti) {}
|
|
206 |
virtual int Opcode() const;
|
|
207 |
virtual uint ideal_reg() const { return Op_RegI; }
|
|
208 |
virtual int store_Opcode() const { return Op_StoreI; }
|
|
209 |
virtual BasicType memory_type() const { return T_INT; }
|
|
210 |
};
|
|
211 |
|
|
212 |
//------------------------------LoadRangeNode----------------------------------
|
|
213 |
// Load an array length from the array
|
|
214 |
class LoadRangeNode : public LoadINode {
|
|
215 |
public:
|
|
216 |
LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
|
|
217 |
: LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
|
|
218 |
virtual int Opcode() const;
|
|
219 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
220 |
virtual Node *Identity( PhaseTransform *phase );
|
|
221 |
};
|
|
222 |
|
|
223 |
//------------------------------LoadLNode--------------------------------------
|
|
224 |
// Load a long from memory
|
|
225 |
class LoadLNode : public LoadNode {
|
|
226 |
virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
|
|
227 |
virtual uint cmp( const Node &n ) const {
|
|
228 |
return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
|
|
229 |
&& LoadNode::cmp(n);
|
|
230 |
}
|
|
231 |
virtual uint size_of() const { return sizeof(*this); }
|
|
232 |
const bool _require_atomic_access; // is piecewise load forbidden?
|
|
233 |
|
|
234 |
public:
|
|
235 |
LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
|
|
236 |
const TypeLong *tl = TypeLong::LONG,
|
|
237 |
bool require_atomic_access = false )
|
|
238 |
: LoadNode(c,mem,adr,at,tl)
|
|
239 |
, _require_atomic_access(require_atomic_access)
|
|
240 |
{}
|
|
241 |
virtual int Opcode() const;
|
|
242 |
virtual uint ideal_reg() const { return Op_RegL; }
|
|
243 |
virtual int store_Opcode() const { return Op_StoreL; }
|
|
244 |
virtual BasicType memory_type() const { return T_LONG; }
|
|
245 |
bool require_atomic_access() { return _require_atomic_access; }
|
|
246 |
static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
|
|
247 |
#ifndef PRODUCT
|
|
248 |
virtual void dump_spec(outputStream *st) const {
|
|
249 |
LoadNode::dump_spec(st);
|
|
250 |
if (_require_atomic_access) st->print(" Atomic!");
|
|
251 |
}
|
|
252 |
#endif
|
|
253 |
};
|
|
254 |
|
|
255 |
//------------------------------LoadL_unalignedNode----------------------------
|
|
256 |
// Load a long from unaligned memory
|
|
257 |
class LoadL_unalignedNode : public LoadLNode {
|
|
258 |
public:
|
|
259 |
LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
|
|
260 |
: LoadLNode(c,mem,adr,at) {}
|
|
261 |
virtual int Opcode() const;
|
|
262 |
};
|
|
263 |
|
|
264 |
//------------------------------LoadFNode--------------------------------------
|
|
265 |
// Load a float (64 bits) from memory
|
|
266 |
class LoadFNode : public LoadNode {
|
|
267 |
public:
|
|
268 |
LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
|
|
269 |
: LoadNode(c,mem,adr,at,t) {}
|
|
270 |
virtual int Opcode() const;
|
|
271 |
virtual uint ideal_reg() const { return Op_RegF; }
|
|
272 |
virtual int store_Opcode() const { return Op_StoreF; }
|
|
273 |
virtual BasicType memory_type() const { return T_FLOAT; }
|
|
274 |
};
|
|
275 |
|
|
276 |
//------------------------------LoadDNode--------------------------------------
|
|
277 |
// Load a double (64 bits) from memory
|
|
278 |
class LoadDNode : public LoadNode {
|
|
279 |
public:
|
|
280 |
LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
|
|
281 |
: LoadNode(c,mem,adr,at,t) {}
|
|
282 |
virtual int Opcode() const;
|
|
283 |
virtual uint ideal_reg() const { return Op_RegD; }
|
|
284 |
virtual int store_Opcode() const { return Op_StoreD; }
|
|
285 |
virtual BasicType memory_type() const { return T_DOUBLE; }
|
|
286 |
};
|
|
287 |
|
|
288 |
//------------------------------LoadD_unalignedNode----------------------------
|
|
289 |
// Load a double from unaligned memory
|
|
290 |
class LoadD_unalignedNode : public LoadDNode {
|
|
291 |
public:
|
|
292 |
LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
|
|
293 |
: LoadDNode(c,mem,adr,at) {}
|
|
294 |
virtual int Opcode() const;
|
|
295 |
};
|
|
296 |
|
|
297 |
//------------------------------LoadPNode--------------------------------------
|
|
298 |
// Load a pointer from memory (either object or array)
|
|
299 |
class LoadPNode : public LoadNode {
|
|
300 |
public:
|
|
301 |
LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
|
|
302 |
: LoadNode(c,mem,adr,at,t) {}
|
|
303 |
virtual int Opcode() const;
|
|
304 |
virtual uint ideal_reg() const { return Op_RegP; }
|
|
305 |
virtual int store_Opcode() const { return Op_StoreP; }
|
|
306 |
virtual BasicType memory_type() const { return T_ADDRESS; }
|
|
307 |
// depends_only_on_test is almost always true, and needs to be almost always
|
|
308 |
// true to enable key hoisting & commoning optimizations. However, for the
|
|
309 |
// special case of RawPtr loads from TLS top & end, the control edge carries
|
|
310 |
// the dependence preventing hoisting past a Safepoint instead of the memory
|
|
311 |
// edge. (An unfortunate consequence of having Safepoints not set Raw
|
|
312 |
// Memory; itself an unfortunate consequence of having Nodes which produce
|
|
313 |
// results (new raw memory state) inside of loops preventing all manner of
|
|
314 |
// other optimizations). Basically, it's ugly but so is the alternative.
|
|
315 |
// See comment in macro.cpp, around line 125 expand_allocate_common().
|
|
316 |
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
|
|
317 |
};
|
|
318 |
|
|
319 |
//------------------------------LoadKlassNode----------------------------------
|
|
320 |
// Load a Klass from an object
|
|
321 |
class LoadKlassNode : public LoadPNode {
|
|
322 |
public:
|
|
323 |
LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk = TypeKlassPtr::OBJECT )
|
|
324 |
: LoadPNode(c,mem,adr,at,tk) {}
|
|
325 |
virtual int Opcode() const;
|
|
326 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
327 |
virtual Node *Identity( PhaseTransform *phase );
|
|
328 |
virtual bool depends_only_on_test() const { return true; }
|
|
329 |
};
|
|
330 |
|
|
331 |
//------------------------------LoadSNode--------------------------------------
|
|
332 |
// Load a short (16bits signed) from memory
|
|
333 |
class LoadSNode : public LoadNode {
|
|
334 |
public:
|
|
335 |
LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
|
|
336 |
: LoadNode(c,mem,adr,at,ti) {}
|
|
337 |
virtual int Opcode() const;
|
|
338 |
virtual uint ideal_reg() const { return Op_RegI; }
|
|
339 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
340 |
virtual int store_Opcode() const { return Op_StoreC; }
|
|
341 |
virtual BasicType memory_type() const { return T_SHORT; }
|
|
342 |
};
|
|
343 |
|
|
344 |
//------------------------------StoreNode--------------------------------------
|
|
345 |
// Store value; requires Store, Address and Value
|
|
346 |
class StoreNode : public MemNode {
|
|
347 |
protected:
|
|
348 |
virtual uint cmp( const Node &n ) const;
|
|
349 |
virtual bool depends_only_on_test() const { return false; }
|
|
350 |
|
|
351 |
Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
|
|
352 |
Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
|
|
353 |
|
|
354 |
public:
|
|
355 |
StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
|
|
356 |
: MemNode(c,mem,adr,at,val) {
|
|
357 |
init_class_id(Class_Store);
|
|
358 |
}
|
|
359 |
StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
|
|
360 |
: MemNode(c,mem,adr,at,val,oop_store) {
|
|
361 |
init_class_id(Class_Store);
|
|
362 |
}
|
|
363 |
|
|
364 |
// Polymorphic factory method:
|
|
365 |
static StoreNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, BasicType bt );
|
|
366 |
|
|
367 |
virtual uint hash() const; // Check the type
|
|
368 |
|
|
369 |
// If the store is to Field memory and the pointer is non-null, we can
|
|
370 |
// zero out the control input.
|
|
371 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
372 |
|
|
373 |
// Compute a new Type for this node. Basically we just do the pre-check,
|
|
374 |
// then call the virtual add() to set the type.
|
|
375 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
376 |
|
|
377 |
// Check for identity function on memory (Load then Store at same address)
|
|
378 |
virtual Node *Identity( PhaseTransform *phase );
|
|
379 |
|
|
380 |
// Do not match memory edge
|
|
381 |
virtual uint match_edge(uint idx) const;
|
|
382 |
|
|
383 |
virtual const Type *bottom_type() const; // returns Type::MEMORY
|
|
384 |
|
|
385 |
// Map a store opcode to its corresponding own opcode, trivially.
|
|
386 |
virtual int store_Opcode() const { return Opcode(); }
|
|
387 |
|
|
388 |
// have all possible loads of the value stored been optimized away?
|
|
389 |
bool value_never_loaded(PhaseTransform *phase) const;
|
|
390 |
};
|
|
391 |
|
|
392 |
//------------------------------StoreBNode-------------------------------------
|
|
393 |
// Store byte to memory
|
|
394 |
class StoreBNode : public StoreNode {
|
|
395 |
public:
|
|
396 |
StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
|
|
397 |
virtual int Opcode() const;
|
|
398 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
399 |
virtual BasicType memory_type() const { return T_BYTE; }
|
|
400 |
};
|
|
401 |
|
|
402 |
//------------------------------StoreCNode-------------------------------------
|
|
403 |
// Store char/short to memory
|
|
404 |
class StoreCNode : public StoreNode {
|
|
405 |
public:
|
|
406 |
StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
|
|
407 |
virtual int Opcode() const;
|
|
408 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
409 |
virtual BasicType memory_type() const { return T_CHAR; }
|
|
410 |
};
|
|
411 |
|
|
412 |
//------------------------------StoreINode-------------------------------------
|
|
413 |
// Store int to memory
|
|
414 |
class StoreINode : public StoreNode {
|
|
415 |
public:
|
|
416 |
StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
|
|
417 |
virtual int Opcode() const;
|
|
418 |
virtual BasicType memory_type() const { return T_INT; }
|
|
419 |
};
|
|
420 |
|
|
421 |
//------------------------------StoreLNode-------------------------------------
|
|
422 |
// Store long to memory
|
|
423 |
class StoreLNode : public StoreNode {
|
|
424 |
virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
|
|
425 |
virtual uint cmp( const Node &n ) const {
|
|
426 |
return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
|
|
427 |
&& StoreNode::cmp(n);
|
|
428 |
}
|
|
429 |
virtual uint size_of() const { return sizeof(*this); }
|
|
430 |
const bool _require_atomic_access; // is piecewise store forbidden?
|
|
431 |
|
|
432 |
public:
|
|
433 |
StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
|
|
434 |
bool require_atomic_access = false )
|
|
435 |
: StoreNode(c,mem,adr,at,val)
|
|
436 |
, _require_atomic_access(require_atomic_access)
|
|
437 |
{}
|
|
438 |
virtual int Opcode() const;
|
|
439 |
virtual BasicType memory_type() const { return T_LONG; }
|
|
440 |
bool require_atomic_access() { return _require_atomic_access; }
|
|
441 |
static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
|
|
442 |
#ifndef PRODUCT
|
|
443 |
virtual void dump_spec(outputStream *st) const {
|
|
444 |
StoreNode::dump_spec(st);
|
|
445 |
if (_require_atomic_access) st->print(" Atomic!");
|
|
446 |
}
|
|
447 |
#endif
|
|
448 |
};
|
|
449 |
|
|
450 |
//------------------------------StoreFNode-------------------------------------
|
|
451 |
// Store float to memory
|
|
452 |
class StoreFNode : public StoreNode {
|
|
453 |
public:
|
|
454 |
StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
|
|
455 |
virtual int Opcode() const;
|
|
456 |
virtual BasicType memory_type() const { return T_FLOAT; }
|
|
457 |
};
|
|
458 |
|
|
459 |
//------------------------------StoreDNode-------------------------------------
|
|
460 |
// Store double to memory
|
|
461 |
class StoreDNode : public StoreNode {
|
|
462 |
public:
|
|
463 |
StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
|
|
464 |
virtual int Opcode() const;
|
|
465 |
virtual BasicType memory_type() const { return T_DOUBLE; }
|
|
466 |
};
|
|
467 |
|
|
468 |
//------------------------------StorePNode-------------------------------------
|
|
469 |
// Store pointer to memory
|
|
470 |
class StorePNode : public StoreNode {
|
|
471 |
public:
|
|
472 |
StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
|
|
473 |
virtual int Opcode() const;
|
|
474 |
virtual BasicType memory_type() const { return T_ADDRESS; }
|
|
475 |
};
|
|
476 |
|
|
477 |
//------------------------------StoreCMNode-----------------------------------
|
|
478 |
// Store card-mark byte to memory for CM
|
|
479 |
// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
|
|
480 |
// Preceeding equivalent StoreCMs may be eliminated.
|
|
481 |
class StoreCMNode : public StoreNode {
|
|
482 |
public:
|
|
483 |
StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) : StoreNode(c,mem,adr,at,val,oop_store) {}
|
|
484 |
virtual int Opcode() const;
|
|
485 |
virtual Node *Identity( PhaseTransform *phase );
|
|
486 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
487 |
virtual BasicType memory_type() const { return T_VOID; } // unspecific
|
|
488 |
};
|
|
489 |
|
|
490 |
//------------------------------LoadPLockedNode---------------------------------
|
|
491 |
// Load-locked a pointer from memory (either object or array).
|
|
492 |
// On Sparc & Intel this is implemented as a normal pointer load.
|
|
493 |
// On PowerPC and friends it's a real load-locked.
|
|
494 |
class LoadPLockedNode : public LoadPNode {
|
|
495 |
public:
|
|
496 |
LoadPLockedNode( Node *c, Node *mem, Node *adr )
|
|
497 |
: LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
|
|
498 |
virtual int Opcode() const;
|
|
499 |
virtual int store_Opcode() const { return Op_StorePConditional; }
|
|
500 |
virtual bool depends_only_on_test() const { return true; }
|
|
501 |
};
|
|
502 |
|
|
503 |
//------------------------------LoadLLockedNode---------------------------------
|
|
504 |
// Load-locked a pointer from memory (either object or array).
|
|
505 |
// On Sparc & Intel this is implemented as a normal long load.
|
|
506 |
class LoadLLockedNode : public LoadLNode {
|
|
507 |
public:
|
|
508 |
LoadLLockedNode( Node *c, Node *mem, Node *adr )
|
|
509 |
: LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
|
|
510 |
virtual int Opcode() const;
|
|
511 |
virtual int store_Opcode() const { return Op_StoreLConditional; }
|
|
512 |
};
|
|
513 |
|
|
514 |
//------------------------------SCMemProjNode---------------------------------------
|
|
515 |
// This class defines a projection of the memory state of a store conditional node.
|
|
516 |
// These nodes return a value, but also update memory.
|
|
517 |
class SCMemProjNode : public ProjNode {
|
|
518 |
public:
|
|
519 |
enum {SCMEMPROJCON = (uint)-2};
|
|
520 |
SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
|
|
521 |
virtual int Opcode() const;
|
|
522 |
virtual bool is_CFG() const { return false; }
|
|
523 |
virtual const Type *bottom_type() const {return Type::MEMORY;}
|
|
524 |
virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
|
|
525 |
virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
|
|
526 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
527 |
#ifndef PRODUCT
|
|
528 |
virtual void dump_spec(outputStream *st) const {};
|
|
529 |
#endif
|
|
530 |
};
|
|
531 |
|
|
532 |
//------------------------------LoadStoreNode---------------------------
|
|
533 |
class LoadStoreNode : public Node {
|
|
534 |
public:
|
|
535 |
enum {
|
|
536 |
ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
|
|
537 |
};
|
|
538 |
LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
|
|
539 |
virtual bool depends_only_on_test() const { return false; }
|
|
540 |
virtual const Type *bottom_type() const { return TypeInt::BOOL; }
|
|
541 |
virtual uint ideal_reg() const { return Op_RegI; }
|
|
542 |
virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
|
|
543 |
};
|
|
544 |
|
|
545 |
//------------------------------StorePConditionalNode---------------------------
|
|
546 |
// Conditionally store pointer to memory, if no change since prior
|
|
547 |
// load-locked. Sets flags for success or failure of the store.
|
|
548 |
class StorePConditionalNode : public LoadStoreNode {
|
|
549 |
public:
|
|
550 |
StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
|
|
551 |
virtual int Opcode() const;
|
|
552 |
// Produces flags
|
|
553 |
virtual uint ideal_reg() const { return Op_RegFlags; }
|
|
554 |
};
|
|
555 |
|
|
556 |
//------------------------------StoreLConditionalNode---------------------------
|
|
557 |
// Conditionally store long to memory, if no change since prior
|
|
558 |
// load-locked. Sets flags for success or failure of the store.
|
|
559 |
class StoreLConditionalNode : public LoadStoreNode {
|
|
560 |
public:
|
|
561 |
StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
|
|
562 |
virtual int Opcode() const;
|
|
563 |
};
|
|
564 |
|
|
565 |
|
|
566 |
//------------------------------CompareAndSwapLNode---------------------------
|
|
567 |
class CompareAndSwapLNode : public LoadStoreNode {
|
|
568 |
public:
|
|
569 |
CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
|
|
570 |
virtual int Opcode() const;
|
|
571 |
};
|
|
572 |
|
|
573 |
|
|
574 |
//------------------------------CompareAndSwapINode---------------------------
|
|
575 |
class CompareAndSwapINode : public LoadStoreNode {
|
|
576 |
public:
|
|
577 |
CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
|
|
578 |
virtual int Opcode() const;
|
|
579 |
};
|
|
580 |
|
|
581 |
|
|
582 |
//------------------------------CompareAndSwapPNode---------------------------
|
|
583 |
class CompareAndSwapPNode : public LoadStoreNode {
|
|
584 |
public:
|
|
585 |
CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
|
|
586 |
virtual int Opcode() const;
|
|
587 |
};
|
|
588 |
|
|
589 |
//------------------------------ClearArray-------------------------------------
|
|
590 |
class ClearArrayNode: public Node {
|
|
591 |
public:
|
|
592 |
ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) : Node(ctrl,arymem,word_cnt,base) {}
|
|
593 |
virtual int Opcode() const;
|
|
594 |
virtual const Type *bottom_type() const { return Type::MEMORY; }
|
|
595 |
// ClearArray modifies array elements, and so affects only the
|
|
596 |
// array memory addressed by the bottom_type of its base address.
|
|
597 |
virtual const class TypePtr *adr_type() const;
|
|
598 |
virtual Node *Identity( PhaseTransform *phase );
|
|
599 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
600 |
virtual uint match_edge(uint idx) const;
|
|
601 |
|
|
602 |
// Clear the given area of an object or array.
|
|
603 |
// The start offset must always be aligned mod BytesPerInt.
|
|
604 |
// The end offset must always be aligned mod BytesPerLong.
|
|
605 |
// Return the new memory.
|
|
606 |
static Node* clear_memory(Node* control, Node* mem, Node* dest,
|
|
607 |
intptr_t start_offset,
|
|
608 |
intptr_t end_offset,
|
|
609 |
PhaseGVN* phase);
|
|
610 |
static Node* clear_memory(Node* control, Node* mem, Node* dest,
|
|
611 |
intptr_t start_offset,
|
|
612 |
Node* end_offset,
|
|
613 |
PhaseGVN* phase);
|
|
614 |
static Node* clear_memory(Node* control, Node* mem, Node* dest,
|
|
615 |
Node* start_offset,
|
|
616 |
Node* end_offset,
|
|
617 |
PhaseGVN* phase);
|
|
618 |
};
|
|
619 |
|
|
620 |
//------------------------------StrComp-------------------------------------
|
|
621 |
class StrCompNode: public Node {
|
|
622 |
public:
|
|
623 |
StrCompNode(Node *control,
|
|
624 |
Node* char_array_mem,
|
|
625 |
Node* value_mem,
|
|
626 |
Node* count_mem,
|
|
627 |
Node* offset_mem,
|
|
628 |
Node* s1, Node* s2): Node(control,
|
|
629 |
char_array_mem,
|
|
630 |
value_mem,
|
|
631 |
count_mem,
|
|
632 |
offset_mem,
|
|
633 |
s1, s2) {};
|
|
634 |
virtual int Opcode() const;
|
|
635 |
virtual bool depends_only_on_test() const { return false; }
|
|
636 |
virtual const Type* bottom_type() const { return TypeInt::INT; }
|
|
637 |
// a StrCompNode (conservatively) aliases with everything:
|
|
638 |
virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
|
|
639 |
virtual uint match_edge(uint idx) const;
|
|
640 |
virtual uint ideal_reg() const { return Op_RegI; }
|
|
641 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
642 |
};
|
|
643 |
|
|
644 |
//------------------------------MemBar-----------------------------------------
|
|
645 |
// There are different flavors of Memory Barriers to match the Java Memory
|
|
646 |
// Model. Monitor-enter and volatile-load act as Aquires: no following ref
|
|
647 |
// can be moved to before them. We insert a MemBar-Acquire after a FastLock or
|
|
648 |
// volatile-load. Monitor-exit and volatile-store act as Release: no
|
|
649 |
// preceeding ref can be moved to after them. We insert a MemBar-Release
|
|
650 |
// before a FastUnlock or volatile-store. All volatiles need to be
|
|
651 |
// serialized, so we follow all volatile-stores with a MemBar-Volatile to
|
|
652 |
// seperate it from any following volatile-load.
|
|
653 |
class MemBarNode: public MultiNode {
|
|
654 |
virtual uint hash() const ; // { return NO_HASH; }
|
|
655 |
virtual uint cmp( const Node &n ) const ; // Always fail, except on self
|
|
656 |
|
|
657 |
virtual uint size_of() const { return sizeof(*this); }
|
|
658 |
// Memory type this node is serializing. Usually either rawptr or bottom.
|
|
659 |
const TypePtr* _adr_type;
|
|
660 |
|
|
661 |
public:
|
|
662 |
enum {
|
|
663 |
Precedent = TypeFunc::Parms // optional edge to force precedence
|
|
664 |
};
|
|
665 |
MemBarNode(Compile* C, int alias_idx, Node* precedent);
|
|
666 |
virtual int Opcode() const = 0;
|
|
667 |
virtual const class TypePtr *adr_type() const { return _adr_type; }
|
|
668 |
virtual const Type *Value( PhaseTransform *phase ) const;
|
|
669 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
670 |
virtual uint match_edge(uint idx) const { return 0; }
|
|
671 |
virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
|
|
672 |
virtual Node *match( const ProjNode *proj, const Matcher *m );
|
|
673 |
// Factory method. Builds a wide or narrow membar.
|
|
674 |
// Optional 'precedent' becomes an extra edge if not null.
|
|
675 |
static MemBarNode* make(Compile* C, int opcode,
|
|
676 |
int alias_idx = Compile::AliasIdxBot,
|
|
677 |
Node* precedent = NULL);
|
|
678 |
};
|
|
679 |
|
|
680 |
// "Acquire" - no following ref can move before (but earlier refs can
|
|
681 |
// follow, like an early Load stalled in cache). Requires multi-cpu
|
|
682 |
// visibility. Inserted after a volatile load or FastLock.
|
|
683 |
class MemBarAcquireNode: public MemBarNode {
|
|
684 |
public:
|
|
685 |
MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
|
|
686 |
: MemBarNode(C, alias_idx, precedent) {}
|
|
687 |
virtual int Opcode() const;
|
|
688 |
};
|
|
689 |
|
|
690 |
// "Release" - no earlier ref can move after (but later refs can move
|
|
691 |
// up, like a speculative pipelined cache-hitting Load). Requires
|
|
692 |
// multi-cpu visibility. Inserted before a volatile store or FastUnLock.
|
|
693 |
class MemBarReleaseNode: public MemBarNode {
|
|
694 |
public:
|
|
695 |
MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
|
|
696 |
: MemBarNode(C, alias_idx, precedent) {}
|
|
697 |
virtual int Opcode() const;
|
|
698 |
};
|
|
699 |
|
|
700 |
// Ordering between a volatile store and a following volatile load.
|
|
701 |
// Requires multi-CPU visibility?
|
|
702 |
class MemBarVolatileNode: public MemBarNode {
|
|
703 |
public:
|
|
704 |
MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
|
|
705 |
: MemBarNode(C, alias_idx, precedent) {}
|
|
706 |
virtual int Opcode() const;
|
|
707 |
};
|
|
708 |
|
|
709 |
// Ordering within the same CPU. Used to order unsafe memory references
|
|
710 |
// inside the compiler when we lack alias info. Not needed "outside" the
|
|
711 |
// compiler because the CPU does all the ordering for us.
|
|
712 |
class MemBarCPUOrderNode: public MemBarNode {
|
|
713 |
public:
|
|
714 |
MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
|
|
715 |
: MemBarNode(C, alias_idx, precedent) {}
|
|
716 |
virtual int Opcode() const;
|
|
717 |
virtual uint ideal_reg() const { return 0; } // not matched in the AD file
|
|
718 |
};
|
|
719 |
|
|
720 |
// Isolation of object setup after an AllocateNode and before next safepoint.
|
|
721 |
// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
|
|
722 |
class InitializeNode: public MemBarNode {
|
|
723 |
friend class AllocateNode;
|
|
724 |
|
|
725 |
bool _is_complete;
|
|
726 |
|
|
727 |
public:
|
|
728 |
enum {
|
|
729 |
Control = TypeFunc::Control,
|
|
730 |
Memory = TypeFunc::Memory, // MergeMem for states affected by this op
|
|
731 |
RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
|
|
732 |
RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
|
|
733 |
};
|
|
734 |
|
|
735 |
InitializeNode(Compile* C, int adr_type, Node* rawoop);
|
|
736 |
virtual int Opcode() const;
|
|
737 |
virtual uint size_of() const { return sizeof(*this); }
|
|
738 |
virtual uint ideal_reg() const { return 0; } // not matched in the AD file
|
|
739 |
virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
|
|
740 |
|
|
741 |
// Manage incoming memory edges via a MergeMem on in(Memory):
|
|
742 |
Node* memory(uint alias_idx);
|
|
743 |
|
|
744 |
// The raw memory edge coming directly from the Allocation.
|
|
745 |
// The contents of this memory are *always* all-zero-bits.
|
|
746 |
Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
|
|
747 |
|
|
748 |
// Return the corresponding allocation for this initialization (or null if none).
|
|
749 |
// (Note: Both InitializeNode::allocation and AllocateNode::initialization
|
|
750 |
// are defined in graphKit.cpp, which sets up the bidirectional relation.)
|
|
751 |
AllocateNode* allocation();
|
|
752 |
|
|
753 |
// Anything other than zeroing in this init?
|
|
754 |
bool is_non_zero();
|
|
755 |
|
|
756 |
// An InitializeNode must completed before macro expansion is done.
|
|
757 |
// Completion requires that the AllocateNode must be followed by
|
|
758 |
// initialization of the new memory to zero, then to any initializers.
|
|
759 |
bool is_complete() { return _is_complete; }
|
|
760 |
|
|
761 |
// Mark complete. (Must not yet be complete.)
|
|
762 |
void set_complete(PhaseGVN* phase);
|
|
763 |
|
|
764 |
#ifdef ASSERT
|
|
765 |
// ensure all non-degenerate stores are ordered and non-overlapping
|
|
766 |
bool stores_are_sane(PhaseTransform* phase);
|
|
767 |
#endif //ASSERT
|
|
768 |
|
|
769 |
// See if this store can be captured; return offset where it initializes.
|
|
770 |
// Return 0 if the store cannot be moved (any sort of problem).
|
|
771 |
intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
|
|
772 |
|
|
773 |
// Capture another store; reformat it to write my internal raw memory.
|
|
774 |
// Return the captured copy, else NULL if there is some sort of problem.
|
|
775 |
Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
|
|
776 |
|
|
777 |
// Find captured store which corresponds to the range [start..start+size).
|
|
778 |
// Return my own memory projection (meaning the initial zero bits)
|
|
779 |
// if there is no such store. Return NULL if there is a problem.
|
|
780 |
Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
|
|
781 |
|
|
782 |
// Called when the associated AllocateNode is expanded into CFG.
|
|
783 |
Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
|
|
784 |
intptr_t header_size, Node* size_in_bytes,
|
|
785 |
PhaseGVN* phase);
|
|
786 |
|
|
787 |
private:
|
|
788 |
void remove_extra_zeroes();
|
|
789 |
|
|
790 |
// Find out where a captured store should be placed (or already is placed).
|
|
791 |
int captured_store_insertion_point(intptr_t start, int size_in_bytes,
|
|
792 |
PhaseTransform* phase);
|
|
793 |
|
|
794 |
static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
|
|
795 |
|
|
796 |
Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
|
|
797 |
|
|
798 |
bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
|
|
799 |
|
|
800 |
void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
|
|
801 |
PhaseGVN* phase);
|
|
802 |
|
|
803 |
intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
|
|
804 |
};
|
|
805 |
|
|
806 |
//------------------------------MergeMem---------------------------------------
|
|
807 |
// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
|
|
808 |
class MergeMemNode: public Node {
|
|
809 |
virtual uint hash() const ; // { return NO_HASH; }
|
|
810 |
virtual uint cmp( const Node &n ) const ; // Always fail, except on self
|
|
811 |
friend class MergeMemStream;
|
|
812 |
MergeMemNode(Node* def); // clients use MergeMemNode::make
|
|
813 |
|
|
814 |
public:
|
|
815 |
// If the input is a whole memory state, clone it with all its slices intact.
|
|
816 |
// Otherwise, make a new memory state with just that base memory input.
|
|
817 |
// In either case, the result is a newly created MergeMem.
|
|
818 |
static MergeMemNode* make(Compile* C, Node* base_memory);
|
|
819 |
|
|
820 |
virtual int Opcode() const;
|
|
821 |
virtual Node *Identity( PhaseTransform *phase );
|
|
822 |
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
|
823 |
virtual uint ideal_reg() const { return NotAMachineReg; }
|
|
824 |
virtual uint match_edge(uint idx) const { return 0; }
|
|
825 |
virtual const RegMask &out_RegMask() const;
|
|
826 |
virtual const Type *bottom_type() const { return Type::MEMORY; }
|
|
827 |
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
|
|
828 |
// sparse accessors
|
|
829 |
// Fetch the previously stored "set_memory_at", or else the base memory.
|
|
830 |
// (Caller should clone it if it is a phi-nest.)
|
|
831 |
Node* memory_at(uint alias_idx) const;
|
|
832 |
// set the memory, regardless of its previous value
|
|
833 |
void set_memory_at(uint alias_idx, Node* n);
|
|
834 |
// the "base" is the memory that provides the non-finite support
|
|
835 |
Node* base_memory() const { return in(Compile::AliasIdxBot); }
|
|
836 |
// warning: setting the base can implicitly set any of the other slices too
|
|
837 |
void set_base_memory(Node* def);
|
|
838 |
// sentinel value which denotes a copy of the base memory:
|
|
839 |
Node* empty_memory() const { return in(Compile::AliasIdxTop); }
|
|
840 |
static Node* make_empty_memory(); // where the sentinel comes from
|
|
841 |
bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
|
|
842 |
// hook for the iterator, to perform any necessary setup
|
|
843 |
void iteration_setup(const MergeMemNode* other = NULL);
|
|
844 |
// push sentinels until I am at least as long as the other (semantic no-op)
|
|
845 |
void grow_to_match(const MergeMemNode* other);
|
|
846 |
bool verify_sparse() const PRODUCT_RETURN0;
|
|
847 |
#ifndef PRODUCT
|
|
848 |
virtual void dump_spec(outputStream *st) const;
|
|
849 |
#endif
|
|
850 |
};
|
|
851 |
|
|
852 |
class MergeMemStream : public StackObj {
|
|
853 |
private:
|
|
854 |
MergeMemNode* _mm;
|
|
855 |
const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
|
|
856 |
Node* _mm_base; // loop-invariant base memory of _mm
|
|
857 |
int _idx;
|
|
858 |
int _cnt;
|
|
859 |
Node* _mem;
|
|
860 |
Node* _mem2;
|
|
861 |
int _cnt2;
|
|
862 |
|
|
863 |
void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
|
|
864 |
// subsume_node will break sparseness at times, whenever a memory slice
|
|
865 |
// folds down to a copy of the base ("fat") memory. In such a case,
|
|
866 |
// the raw edge will update to base, although it should be top.
|
|
867 |
// This iterator will recognize either top or base_memory as an
|
|
868 |
// "empty" slice. See is_empty, is_empty2, and next below.
|
|
869 |
//
|
|
870 |
// The sparseness property is repaired in MergeMemNode::Ideal.
|
|
871 |
// As long as access to a MergeMem goes through this iterator
|
|
872 |
// or the memory_at accessor, flaws in the sparseness will
|
|
873 |
// never be observed.
|
|
874 |
//
|
|
875 |
// Also, iteration_setup repairs sparseness.
|
|
876 |
assert(mm->verify_sparse(), "please, no dups of base");
|
|
877 |
assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
|
|
878 |
|
|
879 |
_mm = mm;
|
|
880 |
_mm_base = mm->base_memory();
|
|
881 |
_mm2 = mm2;
|
|
882 |
_cnt = mm->req();
|
|
883 |
_idx = Compile::AliasIdxBot-1; // start at the base memory
|
|
884 |
_mem = NULL;
|
|
885 |
_mem2 = NULL;
|
|
886 |
}
|
|
887 |
|
|
888 |
#ifdef ASSERT
|
|
889 |
Node* check_memory() const {
|
|
890 |
if (at_base_memory())
|
|
891 |
return _mm->base_memory();
|
|
892 |
else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
|
|
893 |
return _mm->memory_at(_idx);
|
|
894 |
else
|
|
895 |
return _mm_base;
|
|
896 |
}
|
|
897 |
Node* check_memory2() const {
|
|
898 |
return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
|
|
899 |
}
|
|
900 |
#endif
|
|
901 |
|
|
902 |
static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
|
|
903 |
void assert_synch() const {
|
|
904 |
assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
|
|
905 |
"no side-effects except through the stream");
|
|
906 |
}
|
|
907 |
|
|
908 |
public:
|
|
909 |
|
|
910 |
// expected usages:
|
|
911 |
// for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
|
|
912 |
// for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
|
|
913 |
|
|
914 |
// iterate over one merge
|
|
915 |
MergeMemStream(MergeMemNode* mm) {
|
|
916 |
mm->iteration_setup();
|
|
917 |
init(mm);
|
|
918 |
debug_only(_cnt2 = 999);
|
|
919 |
}
|
|
920 |
// iterate in parallel over two merges
|
|
921 |
// only iterates through non-empty elements of mm2
|
|
922 |
MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
|
|
923 |
assert(mm2, "second argument must be a MergeMem also");
|
|
924 |
((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
|
|
925 |
mm->iteration_setup(mm2);
|
|
926 |
init(mm, mm2);
|
|
927 |
_cnt2 = mm2->req();
|
|
928 |
}
|
|
929 |
#ifdef ASSERT
|
|
930 |
~MergeMemStream() {
|
|
931 |
assert_synch();
|
|
932 |
}
|
|
933 |
#endif
|
|
934 |
|
|
935 |
MergeMemNode* all_memory() const {
|
|
936 |
return _mm;
|
|
937 |
}
|
|
938 |
Node* base_memory() const {
|
|
939 |
assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
|
|
940 |
return _mm_base;
|
|
941 |
}
|
|
942 |
const MergeMemNode* all_memory2() const {
|
|
943 |
assert(_mm2 != NULL, "");
|
|
944 |
return _mm2;
|
|
945 |
}
|
|
946 |
bool at_base_memory() const {
|
|
947 |
return _idx == Compile::AliasIdxBot;
|
|
948 |
}
|
|
949 |
int alias_idx() const {
|
|
950 |
assert(_mem, "must call next 1st");
|
|
951 |
return _idx;
|
|
952 |
}
|
|
953 |
|
|
954 |
const TypePtr* adr_type() const {
|
|
955 |
return Compile::current()->get_adr_type(alias_idx());
|
|
956 |
}
|
|
957 |
|
|
958 |
const TypePtr* adr_type(Compile* C) const {
|
|
959 |
return C->get_adr_type(alias_idx());
|
|
960 |
}
|
|
961 |
bool is_empty() const {
|
|
962 |
assert(_mem, "must call next 1st");
|
|
963 |
assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
|
|
964 |
return _mem->is_top();
|
|
965 |
}
|
|
966 |
bool is_empty2() const {
|
|
967 |
assert(_mem2, "must call next 1st");
|
|
968 |
assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
|
|
969 |
return _mem2->is_top();
|
|
970 |
}
|
|
971 |
Node* memory() const {
|
|
972 |
assert(!is_empty(), "must not be empty");
|
|
973 |
assert_synch();
|
|
974 |
return _mem;
|
|
975 |
}
|
|
976 |
// get the current memory, regardless of empty or non-empty status
|
|
977 |
Node* force_memory() const {
|
|
978 |
assert(!is_empty() || !at_base_memory(), "");
|
|
979 |
// Use _mm_base to defend against updates to _mem->base_memory().
|
|
980 |
Node *mem = _mem->is_top() ? _mm_base : _mem;
|
|
981 |
assert(mem == check_memory(), "");
|
|
982 |
return mem;
|
|
983 |
}
|
|
984 |
Node* memory2() const {
|
|
985 |
assert(_mem2 == check_memory2(), "");
|
|
986 |
return _mem2;
|
|
987 |
}
|
|
988 |
void set_memory(Node* mem) {
|
|
989 |
if (at_base_memory()) {
|
|
990 |
// Note that this does not change the invariant _mm_base.
|
|
991 |
_mm->set_base_memory(mem);
|
|
992 |
} else {
|
|
993 |
_mm->set_memory_at(_idx, mem);
|
|
994 |
}
|
|
995 |
_mem = mem;
|
|
996 |
assert_synch();
|
|
997 |
}
|
|
998 |
|
|
999 |
// Recover from a side effect to the MergeMemNode.
|
|
1000 |
void set_memory() {
|
|
1001 |
_mem = _mm->in(_idx);
|
|
1002 |
}
|
|
1003 |
|
|
1004 |
bool next() { return next(false); }
|
|
1005 |
bool next2() { return next(true); }
|
|
1006 |
|
|
1007 |
bool next_non_empty() { return next_non_empty(false); }
|
|
1008 |
bool next_non_empty2() { return next_non_empty(true); }
|
|
1009 |
// next_non_empty2 can yield states where is_empty() is true
|
|
1010 |
|
|
1011 |
private:
|
|
1012 |
// find the next item, which might be empty
|
|
1013 |
bool next(bool have_mm2) {
|
|
1014 |
assert((_mm2 != NULL) == have_mm2, "use other next");
|
|
1015 |
assert_synch();
|
|
1016 |
if (++_idx < _cnt) {
|
|
1017 |
// Note: This iterator allows _mm to be non-sparse.
|
|
1018 |
// It behaves the same whether _mem is top or base_memory.
|
|
1019 |
_mem = _mm->in(_idx);
|
|
1020 |
if (have_mm2)
|
|
1021 |
_mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
|
|
1022 |
return true;
|
|
1023 |
}
|
|
1024 |
return false;
|
|
1025 |
}
|
|
1026 |
|
|
1027 |
// find the next non-empty item
|
|
1028 |
bool next_non_empty(bool have_mm2) {
|
|
1029 |
while (next(have_mm2)) {
|
|
1030 |
if (!is_empty()) {
|
|
1031 |
// make sure _mem2 is filled in sensibly
|
|
1032 |
if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
|
|
1033 |
return true;
|
|
1034 |
} else if (have_mm2 && !is_empty2()) {
|
|
1035 |
return true; // is_empty() == true
|
|
1036 |
}
|
|
1037 |
}
|
|
1038 |
return false;
|
|
1039 |
}
|
|
1040 |
};
|
|
1041 |
|
|
1042 |
//------------------------------Prefetch---------------------------------------
|
|
1043 |
|
|
1044 |
// Non-faulting prefetch load. Prefetch for many reads.
|
|
1045 |
class PrefetchReadNode : public Node {
|
|
1046 |
public:
|
|
1047 |
PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
|
|
1048 |
virtual int Opcode() const;
|
|
1049 |
virtual uint ideal_reg() const { return NotAMachineReg; }
|
|
1050 |
virtual uint match_edge(uint idx) const { return idx==2; }
|
|
1051 |
virtual const Type *bottom_type() const { return Type::ABIO; }
|
|
1052 |
};
|
|
1053 |
|
|
1054 |
// Non-faulting prefetch load. Prefetch for many reads & many writes.
|
|
1055 |
class PrefetchWriteNode : public Node {
|
|
1056 |
public:
|
|
1057 |
PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
|
|
1058 |
virtual int Opcode() const;
|
|
1059 |
virtual uint ideal_reg() const { return NotAMachineReg; }
|
|
1060 |
virtual uint match_edge(uint idx) const { return idx==2; }
|
|
1061 |
virtual const Type *bottom_type() const { return Type::ABIO; }
|
|
1062 |
};
|