|
1 /* |
|
2 * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 * have any questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "incls/_precompiled.incl" |
|
26 #include "incls/_ifg.cpp.incl" |
|
27 |
|
28 #define EXACT_PRESSURE 1 |
|
29 |
|
30 //============================================================================= |
|
31 //------------------------------IFG-------------------------------------------- |
|
32 PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) { |
|
33 } |
|
34 |
|
35 //------------------------------init------------------------------------------- |
|
36 void PhaseIFG::init( uint maxlrg ) { |
|
37 _maxlrg = maxlrg; |
|
38 _yanked = new (_arena) VectorSet(_arena); |
|
39 _is_square = false; |
|
40 // Make uninitialized adjacency lists |
|
41 _adjs = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*maxlrg); |
|
42 // Also make empty live range structures |
|
43 _lrgs = (LRG *)_arena->Amalloc( maxlrg * sizeof(LRG) ); |
|
44 memset(_lrgs,0,sizeof(LRG)*maxlrg); |
|
45 // Init all to empty |
|
46 for( uint i = 0; i < maxlrg; i++ ) { |
|
47 _adjs[i].initialize(maxlrg); |
|
48 _lrgs[i].Set_All(); |
|
49 } |
|
50 } |
|
51 |
|
52 //------------------------------add-------------------------------------------- |
|
53 // Add edge between vertices a & b. These are sorted (triangular matrix), |
|
54 // then the smaller number is inserted in the larger numbered array. |
|
55 int PhaseIFG::add_edge( uint a, uint b ) { |
|
56 lrgs(a).invalid_degree(); |
|
57 lrgs(b).invalid_degree(); |
|
58 // Sort a and b, so that a is bigger |
|
59 assert( !_is_square, "only on triangular" ); |
|
60 if( a < b ) { uint tmp = a; a = b; b = tmp; } |
|
61 return _adjs[a].insert( b ); |
|
62 } |
|
63 |
|
64 //------------------------------add_vector------------------------------------- |
|
65 // Add an edge between 'a' and everything in the vector. |
|
66 void PhaseIFG::add_vector( uint a, IndexSet *vec ) { |
|
67 // IFG is triangular, so do the inserts where 'a' < 'b'. |
|
68 assert( !_is_square, "only on triangular" ); |
|
69 IndexSet *adjs_a = &_adjs[a]; |
|
70 if( !vec->count() ) return; |
|
71 |
|
72 IndexSetIterator elements(vec); |
|
73 uint neighbor; |
|
74 while ((neighbor = elements.next()) != 0) { |
|
75 add_edge( a, neighbor ); |
|
76 } |
|
77 } |
|
78 |
|
79 //------------------------------test------------------------------------------- |
|
80 // Is there an edge between a and b? |
|
81 int PhaseIFG::test_edge( uint a, uint b ) const { |
|
82 // Sort a and b, so that a is larger |
|
83 assert( !_is_square, "only on triangular" ); |
|
84 if( a < b ) { uint tmp = a; a = b; b = tmp; } |
|
85 return _adjs[a].member(b); |
|
86 } |
|
87 |
|
88 //------------------------------SquareUp--------------------------------------- |
|
89 // Convert triangular matrix to square matrix |
|
90 void PhaseIFG::SquareUp() { |
|
91 assert( !_is_square, "only on triangular" ); |
|
92 |
|
93 // Simple transpose |
|
94 for( uint i = 0; i < _maxlrg; i++ ) { |
|
95 IndexSetIterator elements(&_adjs[i]); |
|
96 uint datum; |
|
97 while ((datum = elements.next()) != 0) { |
|
98 _adjs[datum].insert( i ); |
|
99 } |
|
100 } |
|
101 _is_square = true; |
|
102 } |
|
103 |
|
104 //------------------------------Compute_Effective_Degree----------------------- |
|
105 // Compute effective degree in bulk |
|
106 void PhaseIFG::Compute_Effective_Degree() { |
|
107 assert( _is_square, "only on square" ); |
|
108 |
|
109 for( uint i = 0; i < _maxlrg; i++ ) |
|
110 lrgs(i).set_degree(effective_degree(i)); |
|
111 } |
|
112 |
|
113 //------------------------------test_edge_sq----------------------------------- |
|
114 int PhaseIFG::test_edge_sq( uint a, uint b ) const { |
|
115 assert( _is_square, "only on square" ); |
|
116 // Swap, so that 'a' has the lesser count. Then binary search is on |
|
117 // the smaller of a's list and b's list. |
|
118 if( neighbor_cnt(a) > neighbor_cnt(b) ) { uint tmp = a; a = b; b = tmp; } |
|
119 //return _adjs[a].unordered_member(b); |
|
120 return _adjs[a].member(b); |
|
121 } |
|
122 |
|
123 //------------------------------Union------------------------------------------ |
|
124 // Union edges of B into A |
|
125 void PhaseIFG::Union( uint a, uint b ) { |
|
126 assert( _is_square, "only on square" ); |
|
127 IndexSet *A = &_adjs[a]; |
|
128 IndexSetIterator b_elements(&_adjs[b]); |
|
129 uint datum; |
|
130 while ((datum = b_elements.next()) != 0) { |
|
131 if(A->insert(datum)) { |
|
132 _adjs[datum].insert(a); |
|
133 lrgs(a).invalid_degree(); |
|
134 lrgs(datum).invalid_degree(); |
|
135 } |
|
136 } |
|
137 } |
|
138 |
|
139 //------------------------------remove_node------------------------------------ |
|
140 // Yank a Node and all connected edges from the IFG. Return a |
|
141 // list of neighbors (edges) yanked. |
|
142 IndexSet *PhaseIFG::remove_node( uint a ) { |
|
143 assert( _is_square, "only on square" ); |
|
144 assert( !_yanked->test(a), "" ); |
|
145 _yanked->set(a); |
|
146 |
|
147 // I remove the LRG from all neighbors. |
|
148 IndexSetIterator elements(&_adjs[a]); |
|
149 LRG &lrg_a = lrgs(a); |
|
150 uint datum; |
|
151 while ((datum = elements.next()) != 0) { |
|
152 _adjs[datum].remove(a); |
|
153 lrgs(datum).inc_degree( -lrg_a.compute_degree(lrgs(datum)) ); |
|
154 } |
|
155 return neighbors(a); |
|
156 } |
|
157 |
|
158 //------------------------------re_insert-------------------------------------- |
|
159 // Re-insert a yanked Node. |
|
160 void PhaseIFG::re_insert( uint a ) { |
|
161 assert( _is_square, "only on square" ); |
|
162 assert( _yanked->test(a), "" ); |
|
163 (*_yanked) >>= a; |
|
164 |
|
165 IndexSetIterator elements(&_adjs[a]); |
|
166 uint datum; |
|
167 while ((datum = elements.next()) != 0) { |
|
168 _adjs[datum].insert(a); |
|
169 lrgs(datum).invalid_degree(); |
|
170 } |
|
171 } |
|
172 |
|
173 //------------------------------compute_degree--------------------------------- |
|
174 // Compute the degree between 2 live ranges. If both live ranges are |
|
175 // aligned-adjacent powers-of-2 then we use the MAX size. If either is |
|
176 // mis-aligned (or for Fat-Projections, not-adjacent) then we have to |
|
177 // MULTIPLY the sizes. Inspect Brigg's thesis on register pairs to see why |
|
178 // this is so. |
|
179 int LRG::compute_degree( LRG &l ) const { |
|
180 int tmp; |
|
181 int num_regs = _num_regs; |
|
182 int nregs = l.num_regs(); |
|
183 tmp = (_fat_proj || l._fat_proj) // either is a fat-proj? |
|
184 ? (num_regs * nregs) // then use product |
|
185 : MAX2(num_regs,nregs); // else use max |
|
186 return tmp; |
|
187 } |
|
188 |
|
189 //------------------------------effective_degree------------------------------- |
|
190 // Compute effective degree for this live range. If both live ranges are |
|
191 // aligned-adjacent powers-of-2 then we use the MAX size. If either is |
|
192 // mis-aligned (or for Fat-Projections, not-adjacent) then we have to |
|
193 // MULTIPLY the sizes. Inspect Brigg's thesis on register pairs to see why |
|
194 // this is so. |
|
195 int PhaseIFG::effective_degree( uint lidx ) const { |
|
196 int eff = 0; |
|
197 int num_regs = lrgs(lidx).num_regs(); |
|
198 int fat_proj = lrgs(lidx)._fat_proj; |
|
199 IndexSet *s = neighbors(lidx); |
|
200 IndexSetIterator elements(s); |
|
201 uint nidx; |
|
202 while((nidx = elements.next()) != 0) { |
|
203 LRG &lrgn = lrgs(nidx); |
|
204 int nregs = lrgn.num_regs(); |
|
205 eff += (fat_proj || lrgn._fat_proj) // either is a fat-proj? |
|
206 ? (num_regs * nregs) // then use product |
|
207 : MAX2(num_regs,nregs); // else use max |
|
208 } |
|
209 return eff; |
|
210 } |
|
211 |
|
212 |
|
213 #ifndef PRODUCT |
|
214 //------------------------------dump------------------------------------------- |
|
215 void PhaseIFG::dump() const { |
|
216 tty->print_cr("-- Interference Graph --%s--", |
|
217 _is_square ? "square" : "triangular" ); |
|
218 if( _is_square ) { |
|
219 for( uint i = 0; i < _maxlrg; i++ ) { |
|
220 tty->print( (*_yanked)[i] ? "XX " : " "); |
|
221 tty->print("L%d: { ",i); |
|
222 IndexSetIterator elements(&_adjs[i]); |
|
223 uint datum; |
|
224 while ((datum = elements.next()) != 0) { |
|
225 tty->print("L%d ", datum); |
|
226 } |
|
227 tty->print_cr("}"); |
|
228 |
|
229 } |
|
230 return; |
|
231 } |
|
232 |
|
233 // Triangular |
|
234 for( uint i = 0; i < _maxlrg; i++ ) { |
|
235 uint j; |
|
236 tty->print( (*_yanked)[i] ? "XX " : " "); |
|
237 tty->print("L%d: { ",i); |
|
238 for( j = _maxlrg; j > i; j-- ) |
|
239 if( test_edge(j - 1,i) ) { |
|
240 tty->print("L%d ",j - 1); |
|
241 } |
|
242 tty->print("| "); |
|
243 IndexSetIterator elements(&_adjs[i]); |
|
244 uint datum; |
|
245 while ((datum = elements.next()) != 0) { |
|
246 tty->print("L%d ", datum); |
|
247 } |
|
248 tty->print("}\n"); |
|
249 } |
|
250 tty->print("\n"); |
|
251 } |
|
252 |
|
253 //------------------------------stats------------------------------------------ |
|
254 void PhaseIFG::stats() const { |
|
255 ResourceMark rm; |
|
256 int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2); |
|
257 memset( h_cnt, 0, sizeof(int)*_maxlrg*2 ); |
|
258 uint i; |
|
259 for( i = 0; i < _maxlrg; i++ ) { |
|
260 h_cnt[neighbor_cnt(i)]++; |
|
261 } |
|
262 tty->print_cr("--Histogram of counts--"); |
|
263 for( i = 0; i < _maxlrg*2; i++ ) |
|
264 if( h_cnt[i] ) |
|
265 tty->print("%d/%d ",i,h_cnt[i]); |
|
266 tty->print_cr(""); |
|
267 } |
|
268 |
|
269 //------------------------------verify----------------------------------------- |
|
270 void PhaseIFG::verify( const PhaseChaitin *pc ) const { |
|
271 // IFG is square, sorted and no need for Find |
|
272 for( uint i = 0; i < _maxlrg; i++ ) { |
|
273 assert(!((*_yanked)[i]) || !neighbor_cnt(i), "Is removed completely" ); |
|
274 IndexSet *set = &_adjs[i]; |
|
275 IndexSetIterator elements(set); |
|
276 uint idx; |
|
277 uint last = 0; |
|
278 while ((idx = elements.next()) != 0) { |
|
279 assert( idx != i, "Must have empty diagonal"); |
|
280 assert( pc->Find_const(idx) == idx, "Must not need Find" ); |
|
281 assert( _adjs[idx].member(i), "IFG not square" ); |
|
282 assert( !(*_yanked)[idx], "No yanked neighbors" ); |
|
283 assert( last < idx, "not sorted increasing"); |
|
284 last = idx; |
|
285 } |
|
286 assert( !lrgs(i)._degree_valid || |
|
287 effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" ); |
|
288 } |
|
289 } |
|
290 #endif |
|
291 |
|
292 //------------------------------interfere_with_live---------------------------- |
|
293 // Interfere this register with everything currently live. Use the RegMasks |
|
294 // to trim the set of possible interferences. Return a count of register-only |
|
295 // inteferences as an estimate of register pressure. |
|
296 void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) { |
|
297 uint retval = 0; |
|
298 // Interfere with everything live. |
|
299 const RegMask &rm = lrgs(r).mask(); |
|
300 // Check for interference by checking overlap of regmasks. |
|
301 // Only interfere if acceptable register masks overlap. |
|
302 IndexSetIterator elements(liveout); |
|
303 uint l; |
|
304 while( (l = elements.next()) != 0 ) |
|
305 if( rm.overlap( lrgs(l).mask() ) ) |
|
306 _ifg->add_edge( r, l ); |
|
307 } |
|
308 |
|
309 //------------------------------build_ifg_virtual------------------------------ |
|
310 // Actually build the interference graph. Uses virtual registers only, no |
|
311 // physical register masks. This allows me to be very aggressive when |
|
312 // coalescing copies. Some of this aggressiveness will have to be undone |
|
313 // later, but I'd rather get all the copies I can now (since unremoved copies |
|
314 // at this point can end up in bad places). Copies I re-insert later I have |
|
315 // more opportunity to insert them in low-frequency locations. |
|
316 void PhaseChaitin::build_ifg_virtual( ) { |
|
317 |
|
318 // For all blocks (in any order) do... |
|
319 for( uint i=0; i<_cfg._num_blocks; i++ ) { |
|
320 Block *b = _cfg._blocks[i]; |
|
321 IndexSet *liveout = _live->live(b); |
|
322 |
|
323 // The IFG is built by a single reverse pass over each basic block. |
|
324 // Starting with the known live-out set, we remove things that get |
|
325 // defined and add things that become live (essentially executing one |
|
326 // pass of a standard LIVE analysis). Just before a Node defines a value |
|
327 // (and removes it from the live-ness set) that value is certainly live. |
|
328 // The defined value interferes with everything currently live. The |
|
329 // value is then removed from the live-ness set and it's inputs are |
|
330 // added to the live-ness set. |
|
331 for( uint j = b->end_idx() + 1; j > 1; j-- ) { |
|
332 Node *n = b->_nodes[j-1]; |
|
333 |
|
334 // Get value being defined |
|
335 uint r = n2lidx(n); |
|
336 |
|
337 // Some special values do not allocate |
|
338 if( r ) { |
|
339 |
|
340 // Remove from live-out set |
|
341 liveout->remove(r); |
|
342 |
|
343 // Copies do not define a new value and so do not interfere. |
|
344 // Remove the copies source from the liveout set before interfering. |
|
345 uint idx = n->is_Copy(); |
|
346 if( idx ) liveout->remove( n2lidx(n->in(idx)) ); |
|
347 |
|
348 // Interfere with everything live |
|
349 interfere_with_live( r, liveout ); |
|
350 } |
|
351 |
|
352 // Make all inputs live |
|
353 if( !n->is_Phi() ) { // Phi function uses come from prior block |
|
354 for( uint k = 1; k < n->req(); k++ ) |
|
355 liveout->insert( n2lidx(n->in(k)) ); |
|
356 } |
|
357 |
|
358 // 2-address instructions always have the defined value live |
|
359 // on entry to the instruction, even though it is being defined |
|
360 // by the instruction. We pretend a virtual copy sits just prior |
|
361 // to the instruction and kills the src-def'd register. |
|
362 // In other words, for 2-address instructions the defined value |
|
363 // interferes with all inputs. |
|
364 uint idx; |
|
365 if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) { |
|
366 const MachNode *mach = n->as_Mach(); |
|
367 // Sometimes my 2-address ADDs are commuted in a bad way. |
|
368 // We generally want the USE-DEF register to refer to the |
|
369 // loop-varying quantity, to avoid a copy. |
|
370 uint op = mach->ideal_Opcode(); |
|
371 // Check that mach->num_opnds() == 3 to ensure instruction is |
|
372 // not subsuming constants, effectively excludes addI_cin_imm |
|
373 // Can NOT swap for instructions like addI_cin_imm since it |
|
374 // is adding zero to yhi + carry and the second ideal-input |
|
375 // points to the result of adding low-halves. |
|
376 // Checking req() and num_opnds() does NOT distinguish addI_cout from addI_cout_imm |
|
377 if( (op == Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) && |
|
378 n->in(1)->bottom_type()->base() == Type::Int && |
|
379 // See if the ADD is involved in a tight data loop the wrong way |
|
380 n->in(2)->is_Phi() && |
|
381 n->in(2)->in(2) == n ) { |
|
382 Node *tmp = n->in(1); |
|
383 n->set_req( 1, n->in(2) ); |
|
384 n->set_req( 2, tmp ); |
|
385 } |
|
386 // Defined value interferes with all inputs |
|
387 uint lidx = n2lidx(n->in(idx)); |
|
388 for( uint k = 1; k < n->req(); k++ ) { |
|
389 uint kidx = n2lidx(n->in(k)); |
|
390 if( kidx != lidx ) |
|
391 _ifg->add_edge( r, kidx ); |
|
392 } |
|
393 } |
|
394 } // End of forall instructions in block |
|
395 } // End of forall blocks |
|
396 } |
|
397 |
|
398 //------------------------------count_int_pressure----------------------------- |
|
399 uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) { |
|
400 IndexSetIterator elements(liveout); |
|
401 uint lidx; |
|
402 uint cnt = 0; |
|
403 while ((lidx = elements.next()) != 0) { |
|
404 if( lrgs(lidx).mask().is_UP() && |
|
405 lrgs(lidx).mask_size() && |
|
406 !lrgs(lidx)._is_float && |
|
407 lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) |
|
408 cnt += lrgs(lidx).reg_pressure(); |
|
409 } |
|
410 return cnt; |
|
411 } |
|
412 |
|
413 //------------------------------count_float_pressure--------------------------- |
|
414 uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) { |
|
415 IndexSetIterator elements(liveout); |
|
416 uint lidx; |
|
417 uint cnt = 0; |
|
418 while ((lidx = elements.next()) != 0) { |
|
419 if( lrgs(lidx).mask().is_UP() && |
|
420 lrgs(lidx).mask_size() && |
|
421 lrgs(lidx)._is_float ) |
|
422 cnt += lrgs(lidx).reg_pressure(); |
|
423 } |
|
424 return cnt; |
|
425 } |
|
426 |
|
427 //------------------------------lower_pressure--------------------------------- |
|
428 // Adjust register pressure down by 1. Capture last hi-to-low transition, |
|
429 static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) { |
|
430 if( lrg->mask().is_UP() && lrg->mask_size() ) { |
|
431 if( lrg->_is_float ) { |
|
432 pressure[1] -= lrg->reg_pressure(); |
|
433 if( pressure[1] == (uint)FLOATPRESSURE ) { |
|
434 hrp_index[1] = where; |
|
435 #ifdef EXACT_PRESSURE |
|
436 if( pressure[1] > b->_freg_pressure ) |
|
437 b->_freg_pressure = pressure[1]+1; |
|
438 #else |
|
439 b->_freg_pressure = (uint)FLOATPRESSURE+1; |
|
440 #endif |
|
441 } |
|
442 } else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) { |
|
443 pressure[0] -= lrg->reg_pressure(); |
|
444 if( pressure[0] == (uint)INTPRESSURE ) { |
|
445 hrp_index[0] = where; |
|
446 #ifdef EXACT_PRESSURE |
|
447 if( pressure[0] > b->_reg_pressure ) |
|
448 b->_reg_pressure = pressure[0]+1; |
|
449 #else |
|
450 b->_reg_pressure = (uint)INTPRESSURE+1; |
|
451 #endif |
|
452 } |
|
453 } |
|
454 } |
|
455 } |
|
456 |
|
457 //------------------------------build_ifg_physical----------------------------- |
|
458 // Build the interference graph using physical registers when available. |
|
459 // That is, if 2 live ranges are simultaneously alive but in their acceptable |
|
460 // register sets do not overlap, then they do not interfere. |
|
461 uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { |
|
462 NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); ) |
|
463 |
|
464 uint spill_reg = LRG::SPILL_REG; |
|
465 uint must_spill = 0; |
|
466 |
|
467 // For all blocks (in any order) do... |
|
468 for( uint i = 0; i < _cfg._num_blocks; i++ ) { |
|
469 Block *b = _cfg._blocks[i]; |
|
470 // Clone (rather than smash in place) the liveout info, so it is alive |
|
471 // for the "collect_gc_info" phase later. |
|
472 IndexSet liveout(_live->live(b)); |
|
473 uint last_inst = b->end_idx(); |
|
474 // Compute last phi index |
|
475 uint last_phi; |
|
476 for( last_phi = 1; last_phi < last_inst; last_phi++ ) |
|
477 if( !b->_nodes[last_phi]->is_Phi() ) |
|
478 break; |
|
479 |
|
480 // Reset block's register pressure values for each ifg construction |
|
481 uint pressure[2], hrp_index[2]; |
|
482 pressure[0] = pressure[1] = 0; |
|
483 hrp_index[0] = hrp_index[1] = last_inst+1; |
|
484 b->_reg_pressure = b->_freg_pressure = 0; |
|
485 // Liveout things are presumed live for the whole block. We accumulate |
|
486 // 'area' accordingly. If they get killed in the block, we'll subtract |
|
487 // the unused part of the block from the area. |
|
488 double cost = b->_freq * double(last_inst-last_phi); |
|
489 assert( cost >= 0, "negative spill cost" ); |
|
490 IndexSetIterator elements(&liveout); |
|
491 uint lidx; |
|
492 while ((lidx = elements.next()) != 0) { |
|
493 LRG &lrg = lrgs(lidx); |
|
494 lrg._area += cost; |
|
495 // Compute initial register pressure |
|
496 if( lrg.mask().is_UP() && lrg.mask_size() ) { |
|
497 if( lrg._is_float ) { // Count float pressure |
|
498 pressure[1] += lrg.reg_pressure(); |
|
499 #ifdef EXACT_PRESSURE |
|
500 if( pressure[1] > b->_freg_pressure ) |
|
501 b->_freg_pressure = pressure[1]; |
|
502 #endif |
|
503 // Count int pressure, but do not count the SP, flags |
|
504 } else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) { |
|
505 pressure[0] += lrg.reg_pressure(); |
|
506 #ifdef EXACT_PRESSURE |
|
507 if( pressure[0] > b->_reg_pressure ) |
|
508 b->_reg_pressure = pressure[0]; |
|
509 #endif |
|
510 } |
|
511 } |
|
512 } |
|
513 assert( pressure[0] == count_int_pressure (&liveout), "" ); |
|
514 assert( pressure[1] == count_float_pressure(&liveout), "" ); |
|
515 |
|
516 // The IFG is built by a single reverse pass over each basic block. |
|
517 // Starting with the known live-out set, we remove things that get |
|
518 // defined and add things that become live (essentially executing one |
|
519 // pass of a standard LIVE analysis). Just before a Node defines a value |
|
520 // (and removes it from the live-ness set) that value is certainly live. |
|
521 // The defined value interferes with everything currently live. The |
|
522 // value is then removed from the live-ness set and it's inputs are added |
|
523 // to the live-ness set. |
|
524 uint j; |
|
525 for( j = last_inst + 1; j > 1; j-- ) { |
|
526 Node *n = b->_nodes[j - 1]; |
|
527 |
|
528 // Get value being defined |
|
529 uint r = n2lidx(n); |
|
530 |
|
531 // Some special values do not allocate |
|
532 if( r ) { |
|
533 // A DEF normally costs block frequency; rematerialized values are |
|
534 // removed from the DEF sight, so LOWER costs here. |
|
535 lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq; |
|
536 |
|
537 // If it is not live, then this instruction is dead. Probably caused |
|
538 // by spilling and rematerialization. Who cares why, yank this baby. |
|
539 if( !liveout.member(r) && n->Opcode() != Op_SafePoint ) { |
|
540 Node *def = n->in(0); |
|
541 if( !n->is_Proj() || |
|
542 // Could also be a flags-projection of a dead ADD or such. |
|
543 (n2lidx(def) && !liveout.member(n2lidx(def)) ) ) { |
|
544 b->_nodes.remove(j - 1); |
|
545 if( lrgs(r)._def == n ) lrgs(r)._def = 0; |
|
546 n->disconnect_inputs(NULL); |
|
547 _cfg._bbs.map(n->_idx,NULL); |
|
548 n->replace_by(C->top()); |
|
549 // Since yanking a Node from block, high pressure moves up one |
|
550 hrp_index[0]--; |
|
551 hrp_index[1]--; |
|
552 continue; |
|
553 } |
|
554 |
|
555 // Fat-projections kill many registers which cannot be used to |
|
556 // hold live ranges. |
|
557 if( lrgs(r)._fat_proj ) { |
|
558 // Count the int-only registers |
|
559 RegMask itmp = lrgs(r).mask(); |
|
560 itmp.AND(*Matcher::idealreg2regmask[Op_RegI]); |
|
561 int iregs = itmp.Size(); |
|
562 #ifdef EXACT_PRESSURE |
|
563 if( pressure[0]+iregs > b->_reg_pressure ) |
|
564 b->_reg_pressure = pressure[0]+iregs; |
|
565 #endif |
|
566 if( pressure[0] <= (uint)INTPRESSURE && |
|
567 pressure[0]+iregs > (uint)INTPRESSURE ) { |
|
568 #ifndef EXACT_PRESSURE |
|
569 b->_reg_pressure = (uint)INTPRESSURE+1; |
|
570 #endif |
|
571 hrp_index[0] = j-1; |
|
572 } |
|
573 // Count the float-only registers |
|
574 RegMask ftmp = lrgs(r).mask(); |
|
575 ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]); |
|
576 int fregs = ftmp.Size(); |
|
577 #ifdef EXACT_PRESSURE |
|
578 if( pressure[1]+fregs > b->_freg_pressure ) |
|
579 b->_freg_pressure = pressure[1]+fregs; |
|
580 #endif |
|
581 if( pressure[1] <= (uint)FLOATPRESSURE && |
|
582 pressure[1]+fregs > (uint)FLOATPRESSURE ) { |
|
583 #ifndef EXACT_PRESSURE |
|
584 b->_freg_pressure = (uint)FLOATPRESSURE+1; |
|
585 #endif |
|
586 hrp_index[1] = j-1; |
|
587 } |
|
588 } |
|
589 |
|
590 } else { // Else it is live |
|
591 // A DEF also ends 'area' partway through the block. |
|
592 lrgs(r)._area -= cost; |
|
593 assert( lrgs(r)._area >= 0, "negative spill area" ); |
|
594 |
|
595 // Insure high score for immediate-use spill copies so they get a color |
|
596 if( n->is_SpillCopy() |
|
597 && lrgs(r)._def != NodeSentinel // MultiDef live range can still split |
|
598 && n->outcnt() == 1 // and use must be in this block |
|
599 && _cfg._bbs[n->unique_out()->_idx] == b ) { |
|
600 // All single-use MachSpillCopy(s) that immediately precede their |
|
601 // use must color early. If a longer live range steals their |
|
602 // color, the spill copy will split and may push another spill copy |
|
603 // further away resulting in an infinite spill-split-retry cycle. |
|
604 // Assigning a zero area results in a high score() and a good |
|
605 // location in the simplify list. |
|
606 // |
|
607 |
|
608 Node *single_use = n->unique_out(); |
|
609 assert( b->find_node(single_use) >= j, "Use must be later in block"); |
|
610 // Use can be earlier in block if it is a Phi, but then I should be a MultiDef |
|
611 |
|
612 // Find first non SpillCopy 'm' that follows the current instruction |
|
613 // (j - 1) is index for current instruction 'n' |
|
614 Node *m = n; |
|
615 for( uint i = j; i <= last_inst && m->is_SpillCopy(); ++i ) { m = b->_nodes[i]; } |
|
616 if( m == single_use ) { |
|
617 lrgs(r)._area = 0.0; |
|
618 } |
|
619 } |
|
620 |
|
621 // Remove from live-out set |
|
622 if( liveout.remove(r) ) { |
|
623 // Adjust register pressure. |
|
624 // Capture last hi-to-lo pressure transition |
|
625 lower_pressure( &lrgs(r), j-1, b, pressure, hrp_index ); |
|
626 assert( pressure[0] == count_int_pressure (&liveout), "" ); |
|
627 assert( pressure[1] == count_float_pressure(&liveout), "" ); |
|
628 } |
|
629 |
|
630 // Copies do not define a new value and so do not interfere. |
|
631 // Remove the copies source from the liveout set before interfering. |
|
632 uint idx = n->is_Copy(); |
|
633 if( idx ) { |
|
634 uint x = n2lidx(n->in(idx)); |
|
635 if( liveout.remove( x ) ) { |
|
636 lrgs(x)._area -= cost; |
|
637 // Adjust register pressure. |
|
638 lower_pressure( &lrgs(x), j-1, b, pressure, hrp_index ); |
|
639 assert( pressure[0] == count_int_pressure (&liveout), "" ); |
|
640 assert( pressure[1] == count_float_pressure(&liveout), "" ); |
|
641 } |
|
642 } |
|
643 } // End of if live or not |
|
644 |
|
645 // Interfere with everything live. If the defined value must |
|
646 // go in a particular register, just remove that register from |
|
647 // all conflicting parties and avoid the interference. |
|
648 |
|
649 // Make exclusions for rematerializable defs. Since rematerializable |
|
650 // DEFs are not bound but the live range is, some uses must be bound. |
|
651 // If we spill live range 'r', it can rematerialize at each use site |
|
652 // according to its bindings. |
|
653 const RegMask &rmask = lrgs(r).mask(); |
|
654 if( lrgs(r).is_bound() && !(n->rematerialize()) && rmask.is_NotEmpty() ) { |
|
655 // Smear odd bits; leave only aligned pairs of bits. |
|
656 RegMask r2mask = rmask; |
|
657 r2mask.SmearToPairs(); |
|
658 // Check for common case |
|
659 int r_size = lrgs(r).num_regs(); |
|
660 OptoReg::Name r_reg = (r_size == 1) ? rmask.find_first_elem() : OptoReg::Physical; |
|
661 |
|
662 IndexSetIterator elements(&liveout); |
|
663 uint l; |
|
664 while ((l = elements.next()) != 0) { |
|
665 LRG &lrg = lrgs(l); |
|
666 // If 'l' must spill already, do not further hack his bits. |
|
667 // He'll get some interferences and be forced to spill later. |
|
668 if( lrg._must_spill ) continue; |
|
669 // Remove bound register(s) from 'l's choices |
|
670 RegMask old = lrg.mask(); |
|
671 uint old_size = lrg.mask_size(); |
|
672 // Remove the bits from LRG 'r' from LRG 'l' so 'l' no |
|
673 // longer interferes with 'r'. If 'l' requires aligned |
|
674 // adjacent pairs, subtract out bit pairs. |
|
675 if( lrg.num_regs() == 2 && !lrg._fat_proj ) { |
|
676 lrg.SUBTRACT( r2mask ); |
|
677 lrg.compute_set_mask_size(); |
|
678 } else if( r_size != 1 ) { |
|
679 lrg.SUBTRACT( rmask ); |
|
680 lrg.compute_set_mask_size(); |
|
681 } else { // Common case: size 1 bound removal |
|
682 if( lrg.mask().Member(r_reg) ) { |
|
683 lrg.Remove(r_reg); |
|
684 lrg.set_mask_size(lrg.mask().is_AllStack() ? 65535:old_size-1); |
|
685 } |
|
686 } |
|
687 // If 'l' goes completely dry, it must spill. |
|
688 if( lrg.not_free() ) { |
|
689 // Give 'l' some kind of reasonable mask, so he picks up |
|
690 // interferences (and will spill later). |
|
691 lrg.set_mask( old ); |
|
692 lrg.set_mask_size(old_size); |
|
693 must_spill++; |
|
694 lrg._must_spill = 1; |
|
695 lrg.set_reg(OptoReg::Name(LRG::SPILL_REG)); |
|
696 } |
|
697 } |
|
698 } // End of if bound |
|
699 |
|
700 // Now interference with everything that is live and has |
|
701 // compatible register sets. |
|
702 interfere_with_live(r,&liveout); |
|
703 |
|
704 } // End of if normal register-allocated value |
|
705 |
|
706 cost -= b->_freq; // Area remaining in the block |
|
707 if( cost < 0.0 ) cost = 0.0; // Cost goes negative in the Phi area |
|
708 |
|
709 // Make all inputs live |
|
710 if( !n->is_Phi() ) { // Phi function uses come from prior block |
|
711 JVMState* jvms = n->jvms(); |
|
712 uint debug_start = jvms ? jvms->debug_start() : 999999; |
|
713 // Start loop at 1 (skip control edge) for most Nodes. |
|
714 // SCMemProj's might be the sole use of a StoreLConditional. |
|
715 // While StoreLConditionals set memory (the SCMemProj use) |
|
716 // they also def flags; if that flag def is unused the |
|
717 // allocator sees a flag-setting instruction with no use of |
|
718 // the flags and assumes it's dead. This keeps the (useless) |
|
719 // flag-setting behavior alive while also keeping the (useful) |
|
720 // memory update effect. |
|
721 for( uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++ ) { |
|
722 Node *def = n->in(k); |
|
723 uint x = n2lidx(def); |
|
724 if( !x ) continue; |
|
725 LRG &lrg = lrgs(x); |
|
726 // No use-side cost for spilling debug info |
|
727 if( k < debug_start ) |
|
728 // A USE costs twice block frequency (once for the Load, once |
|
729 // for a Load-delay). Rematerialized uses only cost once. |
|
730 lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq)); |
|
731 // It is live now |
|
732 if( liveout.insert( x ) ) { |
|
733 // Newly live things assumed live from here to top of block |
|
734 lrg._area += cost; |
|
735 // Adjust register pressure |
|
736 if( lrg.mask().is_UP() && lrg.mask_size() ) { |
|
737 if( lrg._is_float ) { |
|
738 pressure[1] += lrg.reg_pressure(); |
|
739 #ifdef EXACT_PRESSURE |
|
740 if( pressure[1] > b->_freg_pressure ) |
|
741 b->_freg_pressure = pressure[1]; |
|
742 #endif |
|
743 } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) { |
|
744 pressure[0] += lrg.reg_pressure(); |
|
745 #ifdef EXACT_PRESSURE |
|
746 if( pressure[0] > b->_reg_pressure ) |
|
747 b->_reg_pressure = pressure[0]; |
|
748 #endif |
|
749 } |
|
750 } |
|
751 assert( pressure[0] == count_int_pressure (&liveout), "" ); |
|
752 assert( pressure[1] == count_float_pressure(&liveout), "" ); |
|
753 } |
|
754 assert( lrg._area >= 0, "negative spill area" ); |
|
755 } |
|
756 } |
|
757 } // End of reverse pass over all instructions in block |
|
758 |
|
759 // If we run off the top of the block with high pressure and |
|
760 // never see a hi-to-low pressure transition, just record that |
|
761 // the whole block is high pressure. |
|
762 if( pressure[0] > (uint)INTPRESSURE ) { |
|
763 hrp_index[0] = 0; |
|
764 #ifdef EXACT_PRESSURE |
|
765 if( pressure[0] > b->_reg_pressure ) |
|
766 b->_reg_pressure = pressure[0]; |
|
767 #else |
|
768 b->_reg_pressure = (uint)INTPRESSURE+1; |
|
769 #endif |
|
770 } |
|
771 if( pressure[1] > (uint)FLOATPRESSURE ) { |
|
772 hrp_index[1] = 0; |
|
773 #ifdef EXACT_PRESSURE |
|
774 if( pressure[1] > b->_freg_pressure ) |
|
775 b->_freg_pressure = pressure[1]; |
|
776 #else |
|
777 b->_freg_pressure = (uint)FLOATPRESSURE+1; |
|
778 #endif |
|
779 } |
|
780 |
|
781 // Compute high pressure indice; avoid landing in the middle of projnodes |
|
782 j = hrp_index[0]; |
|
783 if( j < b->_nodes.size() && j < b->end_idx()+1 ) { |
|
784 Node *cur = b->_nodes[j]; |
|
785 while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) { |
|
786 j--; |
|
787 cur = b->_nodes[j]; |
|
788 } |
|
789 } |
|
790 b->_ihrp_index = j; |
|
791 j = hrp_index[1]; |
|
792 if( j < b->_nodes.size() && j < b->end_idx()+1 ) { |
|
793 Node *cur = b->_nodes[j]; |
|
794 while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) { |
|
795 j--; |
|
796 cur = b->_nodes[j]; |
|
797 } |
|
798 } |
|
799 b->_fhrp_index = j; |
|
800 |
|
801 #ifndef PRODUCT |
|
802 // Gather Register Pressure Statistics |
|
803 if( PrintOptoStatistics ) { |
|
804 if( b->_reg_pressure > (uint)INTPRESSURE || b->_freg_pressure > (uint)FLOATPRESSURE ) |
|
805 _high_pressure++; |
|
806 else |
|
807 _low_pressure++; |
|
808 } |
|
809 #endif |
|
810 } // End of for all blocks |
|
811 |
|
812 return must_spill; |
|
813 } |