1
|
1 |
/*
|
|
2 |
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "incls/_precompiled.incl"
|
|
26 |
#include "incls/_output.cpp.incl"
|
|
27 |
|
|
28 |
extern uint size_java_to_interp();
|
|
29 |
extern uint reloc_java_to_interp();
|
|
30 |
extern uint size_exception_handler();
|
|
31 |
extern uint size_deopt_handler();
|
|
32 |
|
|
33 |
#ifndef PRODUCT
|
|
34 |
#define DEBUG_ARG(x) , x
|
|
35 |
#else
|
|
36 |
#define DEBUG_ARG(x)
|
|
37 |
#endif
|
|
38 |
|
|
39 |
extern int emit_exception_handler(CodeBuffer &cbuf);
|
|
40 |
extern int emit_deopt_handler(CodeBuffer &cbuf);
|
|
41 |
|
|
42 |
//------------------------------Output-----------------------------------------
|
|
43 |
// Convert Nodes to instruction bits and pass off to the VM
|
|
44 |
void Compile::Output() {
|
|
45 |
// RootNode goes
|
|
46 |
assert( _cfg->_broot->_nodes.size() == 0, "" );
|
|
47 |
|
|
48 |
// Initialize the space for the BufferBlob used to find and verify
|
|
49 |
// instruction size in MachNode::emit_size()
|
|
50 |
init_scratch_buffer_blob();
|
|
51 |
|
|
52 |
// Make sure I can find the Start Node
|
|
53 |
Block_Array& bbs = _cfg->_bbs;
|
|
54 |
Block *entry = _cfg->_blocks[1];
|
|
55 |
Block *broot = _cfg->_broot;
|
|
56 |
|
|
57 |
const StartNode *start = entry->_nodes[0]->as_Start();
|
|
58 |
|
|
59 |
// Replace StartNode with prolog
|
|
60 |
MachPrologNode *prolog = new (this) MachPrologNode();
|
|
61 |
entry->_nodes.map( 0, prolog );
|
|
62 |
bbs.map( prolog->_idx, entry );
|
|
63 |
bbs.map( start->_idx, NULL ); // start is no longer in any block
|
|
64 |
|
|
65 |
// Virtual methods need an unverified entry point
|
|
66 |
|
|
67 |
if( is_osr_compilation() ) {
|
|
68 |
if( PoisonOSREntry ) {
|
|
69 |
// TODO: Should use a ShouldNotReachHereNode...
|
|
70 |
_cfg->insert( broot, 0, new (this) MachBreakpointNode() );
|
|
71 |
}
|
|
72 |
} else {
|
|
73 |
if( _method && !_method->flags().is_static() ) {
|
|
74 |
// Insert unvalidated entry point
|
|
75 |
_cfg->insert( broot, 0, new (this) MachUEPNode() );
|
|
76 |
}
|
|
77 |
|
|
78 |
}
|
|
79 |
|
|
80 |
|
|
81 |
// Break before main entry point
|
|
82 |
if( (_method && _method->break_at_execute())
|
|
83 |
#ifndef PRODUCT
|
|
84 |
||(OptoBreakpoint && is_method_compilation())
|
|
85 |
||(OptoBreakpointOSR && is_osr_compilation())
|
|
86 |
||(OptoBreakpointC2R && !_method)
|
|
87 |
#endif
|
|
88 |
) {
|
|
89 |
// checking for _method means that OptoBreakpoint does not apply to
|
|
90 |
// runtime stubs or frame converters
|
|
91 |
_cfg->insert( entry, 1, new (this) MachBreakpointNode() );
|
|
92 |
}
|
|
93 |
|
|
94 |
// Insert epilogs before every return
|
|
95 |
for( uint i=0; i<_cfg->_num_blocks; i++ ) {
|
|
96 |
Block *b = _cfg->_blocks[i];
|
|
97 |
if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point?
|
|
98 |
Node *m = b->end();
|
|
99 |
if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
|
|
100 |
MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
|
|
101 |
b->add_inst( epilog );
|
|
102 |
bbs.map(epilog->_idx, b);
|
|
103 |
//_regalloc->set_bad(epilog->_idx); // Already initialized this way.
|
|
104 |
}
|
|
105 |
}
|
|
106 |
}
|
|
107 |
|
|
108 |
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
109 |
if ( ZapDeadCompiledLocals ) Insert_zap_nodes();
|
|
110 |
# endif
|
|
111 |
|
|
112 |
ScheduleAndBundle();
|
|
113 |
|
|
114 |
#ifndef PRODUCT
|
|
115 |
if (trace_opto_output()) {
|
|
116 |
tty->print("\n---- After ScheduleAndBundle ----\n");
|
|
117 |
for (uint i = 0; i < _cfg->_num_blocks; i++) {
|
|
118 |
tty->print("\nBB#%03d:\n", i);
|
|
119 |
Block *bb = _cfg->_blocks[i];
|
|
120 |
for (uint j = 0; j < bb->_nodes.size(); j++) {
|
|
121 |
Node *n = bb->_nodes[j];
|
|
122 |
OptoReg::Name reg = _regalloc->get_reg_first(n);
|
|
123 |
tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
|
|
124 |
n->dump();
|
|
125 |
}
|
|
126 |
}
|
|
127 |
}
|
|
128 |
#endif
|
|
129 |
|
|
130 |
if (failing()) return;
|
|
131 |
|
|
132 |
BuildOopMaps();
|
|
133 |
|
|
134 |
if (failing()) return;
|
|
135 |
|
|
136 |
Fill_buffer();
|
|
137 |
}
|
|
138 |
|
|
139 |
bool Compile::need_stack_bang(int frame_size_in_bytes) const {
|
|
140 |
// Determine if we need to generate a stack overflow check.
|
|
141 |
// Do it if the method is not a stub function and
|
|
142 |
// has java calls or has frame size > vm_page_size/8.
|
|
143 |
return (stub_function() == NULL &&
|
|
144 |
(has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
|
|
145 |
}
|
|
146 |
|
|
147 |
bool Compile::need_register_stack_bang() const {
|
|
148 |
// Determine if we need to generate a register stack overflow check.
|
|
149 |
// This is only used on architectures which have split register
|
|
150 |
// and memory stacks (ie. IA64).
|
|
151 |
// Bang if the method is not a stub function and has java calls
|
|
152 |
return (stub_function() == NULL && has_java_calls());
|
|
153 |
}
|
|
154 |
|
|
155 |
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
156 |
|
|
157 |
|
|
158 |
// In order to catch compiler oop-map bugs, we have implemented
|
|
159 |
// a debugging mode called ZapDeadCompilerLocals.
|
|
160 |
// This mode causes the compiler to insert a call to a runtime routine,
|
|
161 |
// "zap_dead_locals", right before each place in compiled code
|
|
162 |
// that could potentially be a gc-point (i.e., a safepoint or oop map point).
|
|
163 |
// The runtime routine checks that locations mapped as oops are really
|
|
164 |
// oops, that locations mapped as values do not look like oops,
|
|
165 |
// and that locations mapped as dead are not used later
|
|
166 |
// (by zapping them to an invalid address).
|
|
167 |
|
|
168 |
int Compile::_CompiledZap_count = 0;
|
|
169 |
|
|
170 |
void Compile::Insert_zap_nodes() {
|
|
171 |
bool skip = false;
|
|
172 |
|
|
173 |
|
|
174 |
// Dink with static counts because code code without the extra
|
|
175 |
// runtime calls is MUCH faster for debugging purposes
|
|
176 |
|
|
177 |
if ( CompileZapFirst == 0 ) ; // nothing special
|
|
178 |
else if ( CompileZapFirst > CompiledZap_count() ) skip = true;
|
|
179 |
else if ( CompileZapFirst == CompiledZap_count() )
|
|
180 |
warning("starting zap compilation after skipping");
|
|
181 |
|
|
182 |
if ( CompileZapLast == -1 ) ; // nothing special
|
|
183 |
else if ( CompileZapLast < CompiledZap_count() ) skip = true;
|
|
184 |
else if ( CompileZapLast == CompiledZap_count() )
|
|
185 |
warning("about to compile last zap");
|
|
186 |
|
|
187 |
++_CompiledZap_count; // counts skipped zaps, too
|
|
188 |
|
|
189 |
if ( skip ) return;
|
|
190 |
|
|
191 |
|
|
192 |
if ( _method == NULL )
|
|
193 |
return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
|
|
194 |
|
|
195 |
// Insert call to zap runtime stub before every node with an oop map
|
|
196 |
for( uint i=0; i<_cfg->_num_blocks; i++ ) {
|
|
197 |
Block *b = _cfg->_blocks[i];
|
|
198 |
for ( uint j = 0; j < b->_nodes.size(); ++j ) {
|
|
199 |
Node *n = b->_nodes[j];
|
|
200 |
|
|
201 |
// Determining if we should insert a zap-a-lot node in output.
|
|
202 |
// We do that for all nodes that has oopmap info, except for calls
|
|
203 |
// to allocation. Calls to allocation passes in the old top-of-eden pointer
|
|
204 |
// and expect the C code to reset it. Hence, there can be no safepoints between
|
|
205 |
// the inlined-allocation and the call to new_Java, etc.
|
|
206 |
// We also cannot zap monitor calls, as they must hold the microlock
|
|
207 |
// during the call to Zap, which also wants to grab the microlock.
|
|
208 |
bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
|
|
209 |
if ( insert ) { // it is MachSafePoint
|
|
210 |
if ( !n->is_MachCall() ) {
|
|
211 |
insert = false;
|
|
212 |
} else if ( n->is_MachCall() ) {
|
|
213 |
MachCallNode* call = n->as_MachCall();
|
|
214 |
if (call->entry_point() == OptoRuntime::new_instance_Java() ||
|
|
215 |
call->entry_point() == OptoRuntime::new_array_Java() ||
|
|
216 |
call->entry_point() == OptoRuntime::multianewarray2_Java() ||
|
|
217 |
call->entry_point() == OptoRuntime::multianewarray3_Java() ||
|
|
218 |
call->entry_point() == OptoRuntime::multianewarray4_Java() ||
|
|
219 |
call->entry_point() == OptoRuntime::multianewarray5_Java() ||
|
|
220 |
call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
|
|
221 |
call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
|
|
222 |
) {
|
|
223 |
insert = false;
|
|
224 |
}
|
|
225 |
}
|
|
226 |
if (insert) {
|
|
227 |
Node *zap = call_zap_node(n->as_MachSafePoint(), i);
|
|
228 |
b->_nodes.insert( j, zap );
|
|
229 |
_cfg->_bbs.map( zap->_idx, b );
|
|
230 |
++j;
|
|
231 |
}
|
|
232 |
}
|
|
233 |
}
|
|
234 |
}
|
|
235 |
}
|
|
236 |
|
|
237 |
|
|
238 |
Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
|
|
239 |
const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
|
|
240 |
CallStaticJavaNode* ideal_node =
|
|
241 |
new (this, tf->domain()->cnt()) CallStaticJavaNode( tf,
|
|
242 |
OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
|
|
243 |
"call zap dead locals stub", 0, TypePtr::BOTTOM);
|
|
244 |
// We need to copy the OopMap from the site we're zapping at.
|
|
245 |
// We have to make a copy, because the zap site might not be
|
|
246 |
// a call site, and zap_dead is a call site.
|
|
247 |
OopMap* clone = node_to_check->oop_map()->deep_copy();
|
|
248 |
|
|
249 |
// Add the cloned OopMap to the zap node
|
|
250 |
ideal_node->set_oop_map(clone);
|
|
251 |
return _matcher->match_sfpt(ideal_node);
|
|
252 |
}
|
|
253 |
|
|
254 |
//------------------------------is_node_getting_a_safepoint--------------------
|
|
255 |
bool Compile::is_node_getting_a_safepoint( Node* n) {
|
|
256 |
// This code duplicates the logic prior to the call of add_safepoint
|
|
257 |
// below in this file.
|
|
258 |
if( n->is_MachSafePoint() ) return true;
|
|
259 |
return false;
|
|
260 |
}
|
|
261 |
|
|
262 |
# endif // ENABLE_ZAP_DEAD_LOCALS
|
|
263 |
|
|
264 |
//------------------------------compute_loop_first_inst_sizes------------------
|
|
265 |
// Compute the size of first NumberOfLoopInstrToAlign instructions at head
|
|
266 |
// of a loop. When aligning a loop we need to provide enough instructions
|
|
267 |
// in cpu's fetch buffer to feed decoders. The loop alignment could be
|
|
268 |
// avoided if we have enough instructions in fetch buffer at the head of a loop.
|
|
269 |
// By default, the size is set to 999999 by Block's constructor so that
|
|
270 |
// a loop will be aligned if the size is not reset here.
|
|
271 |
//
|
|
272 |
// Note: Mach instructions could contain several HW instructions
|
|
273 |
// so the size is estimated only.
|
|
274 |
//
|
|
275 |
void Compile::compute_loop_first_inst_sizes() {
|
|
276 |
// The next condition is used to gate the loop alignment optimization.
|
|
277 |
// Don't aligned a loop if there are enough instructions at the head of a loop
|
|
278 |
// or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
|
|
279 |
// is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
|
|
280 |
// equal to 11 bytes which is the largest address NOP instruction.
|
|
281 |
if( MaxLoopPad < OptoLoopAlignment-1 ) {
|
|
282 |
uint last_block = _cfg->_num_blocks-1;
|
|
283 |
for( uint i=1; i <= last_block; i++ ) {
|
|
284 |
Block *b = _cfg->_blocks[i];
|
|
285 |
// Check the first loop's block which requires an alignment.
|
|
286 |
if( b->head()->is_Loop() &&
|
|
287 |
b->code_alignment() > (uint)relocInfo::addr_unit() ) {
|
|
288 |
uint sum_size = 0;
|
|
289 |
uint inst_cnt = NumberOfLoopInstrToAlign;
|
|
290 |
inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt,
|
|
291 |
_regalloc);
|
|
292 |
// Check the next fallthrough block if first loop's block does not have
|
|
293 |
// enough instructions.
|
|
294 |
if( inst_cnt > 0 && i < last_block ) {
|
|
295 |
// First, check if the first loop's block contains whole loop.
|
|
296 |
// LoopNode::LoopBackControl == 2.
|
|
297 |
Block *bx = _cfg->_bbs[b->pred(2)->_idx];
|
|
298 |
// Skip connector blocks (with limit in case of irreducible loops).
|
|
299 |
int search_limit = 16;
|
|
300 |
while( bx->is_connector() && search_limit-- > 0) {
|
|
301 |
bx = _cfg->_bbs[bx->pred(1)->_idx];
|
|
302 |
}
|
|
303 |
if( bx != b ) { // loop body is in several blocks.
|
|
304 |
Block *nb = NULL;
|
|
305 |
while( inst_cnt > 0 && i < last_block && nb != bx &&
|
|
306 |
!_cfg->_blocks[i+1]->head()->is_Loop() ) {
|
|
307 |
i++;
|
|
308 |
nb = _cfg->_blocks[i];
|
|
309 |
inst_cnt = nb->compute_first_inst_size(sum_size, inst_cnt,
|
|
310 |
_regalloc);
|
|
311 |
} // while( inst_cnt > 0 && i < last_block )
|
|
312 |
} // if( bx != b )
|
|
313 |
} // if( inst_cnt > 0 && i < last_block )
|
|
314 |
b->set_first_inst_size(sum_size);
|
|
315 |
} // f( b->head()->is_Loop() )
|
|
316 |
} // for( i <= last_block )
|
|
317 |
} // if( MaxLoopPad < OptoLoopAlignment-1 )
|
|
318 |
}
|
|
319 |
|
|
320 |
//----------------------Shorten_branches---------------------------------------
|
|
321 |
// The architecture description provides short branch variants for some long
|
|
322 |
// branch instructions. Replace eligible long branches with short branches.
|
|
323 |
void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size) {
|
|
324 |
|
|
325 |
// fill in the nop array for bundling computations
|
|
326 |
MachNode *_nop_list[Bundle::_nop_count];
|
|
327 |
Bundle::initialize_nops(_nop_list, this);
|
|
328 |
|
|
329 |
// ------------------
|
|
330 |
// Compute size of each block, method size, and relocation information size
|
|
331 |
uint *jmp_end = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks);
|
|
332 |
uint *blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1);
|
|
333 |
DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks); )
|
|
334 |
blk_starts[0] = 0;
|
|
335 |
|
|
336 |
// Initialize the sizes to 0
|
|
337 |
code_size = 0; // Size in bytes of generated code
|
|
338 |
stub_size = 0; // Size in bytes of all stub entries
|
|
339 |
// Size in bytes of all relocation entries, including those in local stubs.
|
|
340 |
// Start with 2-bytes of reloc info for the unvalidated entry point
|
|
341 |
reloc_size = 1; // Number of relocation entries
|
|
342 |
const_size = 0; // size of fp constants in words
|
|
343 |
|
|
344 |
// Make three passes. The first computes pessimistic blk_starts,
|
|
345 |
// relative jmp_end, reloc_size and const_size information.
|
|
346 |
// The second performs short branch substitution using the pessimistic
|
|
347 |
// sizing. The third inserts nops where needed.
|
|
348 |
|
|
349 |
Node *nj; // tmp
|
|
350 |
|
|
351 |
// Step one, perform a pessimistic sizing pass.
|
|
352 |
uint i;
|
|
353 |
uint min_offset_from_last_call = 1; // init to a positive value
|
|
354 |
uint nop_size = (new (this) MachNopNode())->size(_regalloc);
|
|
355 |
for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
|
|
356 |
Block *b = _cfg->_blocks[i];
|
|
357 |
|
|
358 |
// Sum all instruction sizes to compute block size
|
|
359 |
uint last_inst = b->_nodes.size();
|
|
360 |
uint blk_size = 0;
|
|
361 |
for( uint j = 0; j<last_inst; j++ ) {
|
|
362 |
nj = b->_nodes[j];
|
|
363 |
uint inst_size = nj->size(_regalloc);
|
|
364 |
blk_size += inst_size;
|
|
365 |
// Handle machine instruction nodes
|
|
366 |
if( nj->is_Mach() ) {
|
|
367 |
MachNode *mach = nj->as_Mach();
|
|
368 |
blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
|
|
369 |
reloc_size += mach->reloc();
|
|
370 |
const_size += mach->const_size();
|
|
371 |
if( mach->is_MachCall() ) {
|
|
372 |
MachCallNode *mcall = mach->as_MachCall();
|
|
373 |
// This destination address is NOT PC-relative
|
|
374 |
|
|
375 |
mcall->method_set((intptr_t)mcall->entry_point());
|
|
376 |
|
|
377 |
if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) {
|
|
378 |
stub_size += size_java_to_interp();
|
|
379 |
reloc_size += reloc_java_to_interp();
|
|
380 |
}
|
|
381 |
} else if (mach->is_MachSafePoint()) {
|
|
382 |
// If call/safepoint are adjacent, account for possible
|
|
383 |
// nop to disambiguate the two safepoints.
|
|
384 |
if (min_offset_from_last_call == 0) {
|
|
385 |
blk_size += nop_size;
|
|
386 |
}
|
|
387 |
}
|
|
388 |
}
|
|
389 |
min_offset_from_last_call += inst_size;
|
|
390 |
// Remember end of call offset
|
|
391 |
if (nj->is_MachCall() && nj->as_MachCall()->is_safepoint_node()) {
|
|
392 |
min_offset_from_last_call = 0;
|
|
393 |
}
|
|
394 |
}
|
|
395 |
|
|
396 |
// During short branch replacement, we store the relative (to blk_starts)
|
|
397 |
// end of jump in jmp_end, rather than the absolute end of jump. This
|
|
398 |
// is so that we do not need to recompute sizes of all nodes when we compute
|
|
399 |
// correct blk_starts in our next sizing pass.
|
|
400 |
jmp_end[i] = blk_size;
|
|
401 |
DEBUG_ONLY( jmp_target[i] = 0; )
|
|
402 |
|
|
403 |
// When the next block starts a loop, we may insert pad NOP
|
|
404 |
// instructions. Since we cannot know our future alignment,
|
|
405 |
// assume the worst.
|
|
406 |
if( i<_cfg->_num_blocks-1 ) {
|
|
407 |
Block *nb = _cfg->_blocks[i+1];
|
|
408 |
int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
|
|
409 |
if( max_loop_pad > 0 ) {
|
|
410 |
assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
|
|
411 |
blk_size += max_loop_pad;
|
|
412 |
}
|
|
413 |
}
|
|
414 |
|
|
415 |
// Save block size; update total method size
|
|
416 |
blk_starts[i+1] = blk_starts[i]+blk_size;
|
|
417 |
}
|
|
418 |
|
|
419 |
// Step two, replace eligible long jumps.
|
|
420 |
|
|
421 |
// Note: this will only get the long branches within short branch
|
|
422 |
// range. Another pass might detect more branches that became
|
|
423 |
// candidates because the shortening in the first pass exposed
|
|
424 |
// more opportunities. Unfortunately, this would require
|
|
425 |
// recomputing the starting and ending positions for the blocks
|
|
426 |
for( i=0; i<_cfg->_num_blocks; i++ ) {
|
|
427 |
Block *b = _cfg->_blocks[i];
|
|
428 |
|
|
429 |
int j;
|
|
430 |
// Find the branch; ignore trailing NOPs.
|
|
431 |
for( j = b->_nodes.size()-1; j>=0; j-- ) {
|
|
432 |
nj = b->_nodes[j];
|
|
433 |
if( !nj->is_Mach() || nj->as_Mach()->ideal_Opcode() != Op_Con )
|
|
434 |
break;
|
|
435 |
}
|
|
436 |
|
|
437 |
if (j >= 0) {
|
|
438 |
if( nj->is_Mach() && nj->as_Mach()->may_be_short_branch() ) {
|
|
439 |
MachNode *mach = nj->as_Mach();
|
|
440 |
// This requires the TRUE branch target be in succs[0]
|
|
441 |
uint bnum = b->non_connector_successor(0)->_pre_order;
|
|
442 |
uintptr_t target = blk_starts[bnum];
|
|
443 |
if( mach->is_pc_relative() ) {
|
|
444 |
int offset = target-(blk_starts[i] + jmp_end[i]);
|
|
445 |
if (_matcher->is_short_branch_offset(offset)) {
|
|
446 |
// We've got a winner. Replace this branch.
|
|
447 |
MachNode *replacement = mach->short_branch_version(this);
|
|
448 |
b->_nodes.map(j, replacement);
|
|
449 |
|
|
450 |
// Update the jmp_end size to save time in our
|
|
451 |
// next pass.
|
|
452 |
jmp_end[i] -= (mach->size(_regalloc) - replacement->size(_regalloc));
|
|
453 |
DEBUG_ONLY( jmp_target[i] = bnum; );
|
|
454 |
}
|
|
455 |
} else {
|
|
456 |
#ifndef PRODUCT
|
|
457 |
mach->dump(3);
|
|
458 |
#endif
|
|
459 |
Unimplemented();
|
|
460 |
}
|
|
461 |
}
|
|
462 |
}
|
|
463 |
}
|
|
464 |
|
|
465 |
// Compute the size of first NumberOfLoopInstrToAlign instructions at head
|
|
466 |
// of a loop. It is used to determine the padding for loop alignment.
|
|
467 |
compute_loop_first_inst_sizes();
|
|
468 |
|
|
469 |
// Step 3, compute the offsets of all the labels
|
|
470 |
uint last_call_adr = max_uint;
|
|
471 |
for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
|
|
472 |
// copy the offset of the beginning to the corresponding label
|
|
473 |
assert(labels[i].is_unused(), "cannot patch at this point");
|
|
474 |
labels[i].bind_loc(blk_starts[i], CodeBuffer::SECT_INSTS);
|
|
475 |
|
|
476 |
// insert padding for any instructions that need it
|
|
477 |
Block *b = _cfg->_blocks[i];
|
|
478 |
uint last_inst = b->_nodes.size();
|
|
479 |
uint adr = blk_starts[i];
|
|
480 |
for( uint j = 0; j<last_inst; j++ ) {
|
|
481 |
nj = b->_nodes[j];
|
|
482 |
if( nj->is_Mach() ) {
|
|
483 |
int padding = nj->as_Mach()->compute_padding(adr);
|
|
484 |
// If call/safepoint are adjacent insert a nop (5010568)
|
|
485 |
if (padding == 0 && nj->is_MachSafePoint() && !nj->is_MachCall() &&
|
|
486 |
adr == last_call_adr ) {
|
|
487 |
padding = nop_size;
|
|
488 |
}
|
|
489 |
if(padding > 0) {
|
|
490 |
assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
|
|
491 |
int nops_cnt = padding / nop_size;
|
|
492 |
MachNode *nop = new (this) MachNopNode(nops_cnt);
|
|
493 |
b->_nodes.insert(j++, nop);
|
|
494 |
_cfg->_bbs.map( nop->_idx, b );
|
|
495 |
adr += padding;
|
|
496 |
last_inst++;
|
|
497 |
}
|
|
498 |
}
|
|
499 |
adr += nj->size(_regalloc);
|
|
500 |
|
|
501 |
// Remember end of call offset
|
|
502 |
if (nj->is_MachCall() && nj->as_MachCall()->is_safepoint_node()) {
|
|
503 |
last_call_adr = adr;
|
|
504 |
}
|
|
505 |
}
|
|
506 |
|
|
507 |
if ( i != _cfg->_num_blocks-1) {
|
|
508 |
// Get the size of the block
|
|
509 |
uint blk_size = adr - blk_starts[i];
|
|
510 |
|
|
511 |
// When the next block starts a loop, we may insert pad NOP
|
|
512 |
// instructions.
|
|
513 |
Block *nb = _cfg->_blocks[i+1];
|
|
514 |
int current_offset = blk_starts[i] + blk_size;
|
|
515 |
current_offset += nb->alignment_padding(current_offset);
|
|
516 |
// Save block size; update total method size
|
|
517 |
blk_starts[i+1] = current_offset;
|
|
518 |
}
|
|
519 |
}
|
|
520 |
|
|
521 |
#ifdef ASSERT
|
|
522 |
for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
|
|
523 |
if( jmp_target[i] != 0 ) {
|
|
524 |
int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_end[i]);
|
|
525 |
if (!_matcher->is_short_branch_offset(offset)) {
|
|
526 |
tty->print_cr("target (%d) - jmp_end(%d) = offset (%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_end[i], offset, i, jmp_target[i]);
|
|
527 |
}
|
|
528 |
assert(_matcher->is_short_branch_offset(offset), "Displacement too large for short jmp");
|
|
529 |
}
|
|
530 |
}
|
|
531 |
#endif
|
|
532 |
|
|
533 |
// ------------------
|
|
534 |
// Compute size for code buffer
|
|
535 |
code_size = blk_starts[i-1] + jmp_end[i-1];
|
|
536 |
|
|
537 |
// Relocation records
|
|
538 |
reloc_size += 1; // Relo entry for exception handler
|
|
539 |
|
|
540 |
// Adjust reloc_size to number of record of relocation info
|
|
541 |
// Min is 2 bytes, max is probably 6 or 8, with a tax up to 25% for
|
|
542 |
// a relocation index.
|
|
543 |
// The CodeBuffer will expand the locs array if this estimate is too low.
|
|
544 |
reloc_size *= 10 / sizeof(relocInfo);
|
|
545 |
|
|
546 |
// Adjust const_size to number of bytes
|
|
547 |
const_size *= 2*jintSize; // both float and double take two words per entry
|
|
548 |
|
|
549 |
}
|
|
550 |
|
|
551 |
//------------------------------FillLocArray-----------------------------------
|
|
552 |
// Create a bit of debug info and append it to the array. The mapping is from
|
|
553 |
// Java local or expression stack to constant, register or stack-slot. For
|
|
554 |
// doubles, insert 2 mappings and return 1 (to tell the caller that the next
|
|
555 |
// entry has been taken care of and caller should skip it).
|
|
556 |
static LocationValue *new_loc_value( PhaseRegAlloc *ra, OptoReg::Name regnum, Location::Type l_type ) {
|
|
557 |
// This should never have accepted Bad before
|
|
558 |
assert(OptoReg::is_valid(regnum), "location must be valid");
|
|
559 |
return (OptoReg::is_reg(regnum))
|
|
560 |
? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
|
|
561 |
: new LocationValue(Location::new_stk_loc(l_type, ra->reg2offset(regnum)));
|
|
562 |
}
|
|
563 |
|
|
564 |
void Compile::FillLocArray( int idx, Node *local, GrowableArray<ScopeValue*> *array ) {
|
|
565 |
assert( local, "use _top instead of null" );
|
|
566 |
if (array->length() != idx) {
|
|
567 |
assert(array->length() == idx + 1, "Unexpected array count");
|
|
568 |
// Old functionality:
|
|
569 |
// return
|
|
570 |
// New functionality:
|
|
571 |
// Assert if the local is not top. In product mode let the new node
|
|
572 |
// override the old entry.
|
|
573 |
assert(local == top(), "LocArray collision");
|
|
574 |
if (local == top()) {
|
|
575 |
return;
|
|
576 |
}
|
|
577 |
array->pop();
|
|
578 |
}
|
|
579 |
const Type *t = local->bottom_type();
|
|
580 |
|
|
581 |
// Grab the register number for the local
|
|
582 |
OptoReg::Name regnum = _regalloc->get_reg_first(local);
|
|
583 |
if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
|
|
584 |
// Record the double as two float registers.
|
|
585 |
// The register mask for such a value always specifies two adjacent
|
|
586 |
// float registers, with the lower register number even.
|
|
587 |
// Normally, the allocation of high and low words to these registers
|
|
588 |
// is irrelevant, because nearly all operations on register pairs
|
|
589 |
// (e.g., StoreD) treat them as a single unit.
|
|
590 |
// Here, we assume in addition that the words in these two registers
|
|
591 |
// stored "naturally" (by operations like StoreD and double stores
|
|
592 |
// within the interpreter) such that the lower-numbered register
|
|
593 |
// is written to the lower memory address. This may seem like
|
|
594 |
// a machine dependency, but it is not--it is a requirement on
|
|
595 |
// the author of the <arch>.ad file to ensure that, for every
|
|
596 |
// even/odd double-register pair to which a double may be allocated,
|
|
597 |
// the word in the even single-register is stored to the first
|
|
598 |
// memory word. (Note that register numbers are completely
|
|
599 |
// arbitrary, and are not tied to any machine-level encodings.)
|
|
600 |
#ifdef _LP64
|
|
601 |
if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon ) {
|
|
602 |
array->append(new ConstantIntValue(0));
|
|
603 |
array->append(new_loc_value( _regalloc, regnum, Location::dbl ));
|
|
604 |
} else if ( t->base() == Type::Long ) {
|
|
605 |
array->append(new ConstantIntValue(0));
|
|
606 |
array->append(new_loc_value( _regalloc, regnum, Location::lng ));
|
|
607 |
} else if ( t->base() == Type::RawPtr ) {
|
|
608 |
// jsr/ret return address which must be restored into a the full
|
|
609 |
// width 64-bit stack slot.
|
|
610 |
array->append(new_loc_value( _regalloc, regnum, Location::lng ));
|
|
611 |
}
|
|
612 |
#else //_LP64
|
|
613 |
#ifdef SPARC
|
|
614 |
if (t->base() == Type::Long && OptoReg::is_reg(regnum)) {
|
|
615 |
// For SPARC we have to swap high and low words for
|
|
616 |
// long values stored in a single-register (g0-g7).
|
|
617 |
array->append(new_loc_value( _regalloc, regnum , Location::normal ));
|
|
618 |
array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
|
|
619 |
} else
|
|
620 |
#endif //SPARC
|
|
621 |
if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) {
|
|
622 |
// Repack the double/long as two jints.
|
|
623 |
// The convention the interpreter uses is that the second local
|
|
624 |
// holds the first raw word of the native double representation.
|
|
625 |
// This is actually reasonable, since locals and stack arrays
|
|
626 |
// grow downwards in all implementations.
|
|
627 |
// (If, on some machine, the interpreter's Java locals or stack
|
|
628 |
// were to grow upwards, the embedded doubles would be word-swapped.)
|
|
629 |
array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
|
|
630 |
array->append(new_loc_value( _regalloc, regnum , Location::normal ));
|
|
631 |
}
|
|
632 |
#endif //_LP64
|
|
633 |
else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
|
|
634 |
OptoReg::is_reg(regnum) ) {
|
|
635 |
array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double
|
|
636 |
? Location::float_in_dbl : Location::normal ));
|
|
637 |
} else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
|
|
638 |
array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long
|
|
639 |
? Location::int_in_long : Location::normal ));
|
|
640 |
} else {
|
|
641 |
array->append(new_loc_value( _regalloc, regnum, _regalloc->is_oop(local) ? Location::oop : Location::normal ));
|
|
642 |
}
|
|
643 |
return;
|
|
644 |
}
|
|
645 |
|
|
646 |
// No register. It must be constant data.
|
|
647 |
switch (t->base()) {
|
|
648 |
case Type::Half: // Second half of a double
|
|
649 |
ShouldNotReachHere(); // Caller should skip 2nd halves
|
|
650 |
break;
|
|
651 |
case Type::AnyPtr:
|
|
652 |
array->append(new ConstantOopWriteValue(NULL));
|
|
653 |
break;
|
|
654 |
case Type::AryPtr:
|
|
655 |
case Type::InstPtr:
|
|
656 |
case Type::KlassPtr: // fall through
|
|
657 |
array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->encoding()));
|
|
658 |
break;
|
|
659 |
case Type::Int:
|
|
660 |
array->append(new ConstantIntValue(t->is_int()->get_con()));
|
|
661 |
break;
|
|
662 |
case Type::RawPtr:
|
|
663 |
// A return address (T_ADDRESS).
|
|
664 |
assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI");
|
|
665 |
#ifdef _LP64
|
|
666 |
// Must be restored to the full-width 64-bit stack slot.
|
|
667 |
array->append(new ConstantLongValue(t->is_ptr()->get_con()));
|
|
668 |
#else
|
|
669 |
array->append(new ConstantIntValue(t->is_ptr()->get_con()));
|
|
670 |
#endif
|
|
671 |
break;
|
|
672 |
case Type::FloatCon: {
|
|
673 |
float f = t->is_float_constant()->getf();
|
|
674 |
array->append(new ConstantIntValue(jint_cast(f)));
|
|
675 |
break;
|
|
676 |
}
|
|
677 |
case Type::DoubleCon: {
|
|
678 |
jdouble d = t->is_double_constant()->getd();
|
|
679 |
#ifdef _LP64
|
|
680 |
array->append(new ConstantIntValue(0));
|
|
681 |
array->append(new ConstantDoubleValue(d));
|
|
682 |
#else
|
|
683 |
// Repack the double as two jints.
|
|
684 |
// The convention the interpreter uses is that the second local
|
|
685 |
// holds the first raw word of the native double representation.
|
|
686 |
// This is actually reasonable, since locals and stack arrays
|
|
687 |
// grow downwards in all implementations.
|
|
688 |
// (If, on some machine, the interpreter's Java locals or stack
|
|
689 |
// were to grow upwards, the embedded doubles would be word-swapped.)
|
|
690 |
jint *dp = (jint*)&d;
|
|
691 |
array->append(new ConstantIntValue(dp[1]));
|
|
692 |
array->append(new ConstantIntValue(dp[0]));
|
|
693 |
#endif
|
|
694 |
break;
|
|
695 |
}
|
|
696 |
case Type::Long: {
|
|
697 |
jlong d = t->is_long()->get_con();
|
|
698 |
#ifdef _LP64
|
|
699 |
array->append(new ConstantIntValue(0));
|
|
700 |
array->append(new ConstantLongValue(d));
|
|
701 |
#else
|
|
702 |
// Repack the long as two jints.
|
|
703 |
// The convention the interpreter uses is that the second local
|
|
704 |
// holds the first raw word of the native double representation.
|
|
705 |
// This is actually reasonable, since locals and stack arrays
|
|
706 |
// grow downwards in all implementations.
|
|
707 |
// (If, on some machine, the interpreter's Java locals or stack
|
|
708 |
// were to grow upwards, the embedded doubles would be word-swapped.)
|
|
709 |
jint *dp = (jint*)&d;
|
|
710 |
array->append(new ConstantIntValue(dp[1]));
|
|
711 |
array->append(new ConstantIntValue(dp[0]));
|
|
712 |
#endif
|
|
713 |
break;
|
|
714 |
}
|
|
715 |
case Type::Top: // Add an illegal value here
|
|
716 |
array->append(new LocationValue(Location()));
|
|
717 |
break;
|
|
718 |
default:
|
|
719 |
ShouldNotReachHere();
|
|
720 |
break;
|
|
721 |
}
|
|
722 |
}
|
|
723 |
|
|
724 |
// Determine if this node starts a bundle
|
|
725 |
bool Compile::starts_bundle(const Node *n) const {
|
|
726 |
return (_node_bundling_limit > n->_idx &&
|
|
727 |
_node_bundling_base[n->_idx].starts_bundle());
|
|
728 |
}
|
|
729 |
|
|
730 |
//--------------------------Process_OopMap_Node--------------------------------
|
|
731 |
void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
|
732 |
|
|
733 |
// Handle special safepoint nodes for synchronization
|
|
734 |
MachSafePointNode *sfn = mach->as_MachSafePoint();
|
|
735 |
MachCallNode *mcall;
|
|
736 |
|
|
737 |
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
738 |
assert( is_node_getting_a_safepoint(mach), "logic does not match; false negative");
|
|
739 |
#endif
|
|
740 |
|
|
741 |
int safepoint_pc_offset = current_offset;
|
|
742 |
|
|
743 |
// Add the safepoint in the DebugInfoRecorder
|
|
744 |
if( !mach->is_MachCall() ) {
|
|
745 |
mcall = NULL;
|
|
746 |
debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
|
|
747 |
} else {
|
|
748 |
mcall = mach->as_MachCall();
|
|
749 |
safepoint_pc_offset += mcall->ret_addr_offset();
|
|
750 |
debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
|
|
751 |
}
|
|
752 |
|
|
753 |
// Loop over the JVMState list to add scope information
|
|
754 |
// Do not skip safepoints with a NULL method, they need monitor info
|
|
755 |
JVMState* youngest_jvms = sfn->jvms();
|
|
756 |
int max_depth = youngest_jvms->depth();
|
|
757 |
|
|
758 |
// Visit scopes from oldest to youngest.
|
|
759 |
for (int depth = 1; depth <= max_depth; depth++) {
|
|
760 |
JVMState* jvms = youngest_jvms->of_depth(depth);
|
|
761 |
int idx;
|
|
762 |
ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
|
|
763 |
// Safepoints that do not have method() set only provide oop-map and monitor info
|
|
764 |
// to support GC; these do not support deoptimization.
|
|
765 |
int num_locs = (method == NULL) ? 0 : jvms->loc_size();
|
|
766 |
int num_exps = (method == NULL) ? 0 : jvms->stk_size();
|
|
767 |
int num_mon = jvms->nof_monitors();
|
|
768 |
assert(method == NULL || jvms->bci() < 0 || num_locs == method->max_locals(),
|
|
769 |
"JVMS local count must match that of the method");
|
|
770 |
|
|
771 |
// Add Local and Expression Stack Information
|
|
772 |
|
|
773 |
// Insert locals into the locarray
|
|
774 |
GrowableArray<ScopeValue*> *locarray = new GrowableArray<ScopeValue*>(num_locs);
|
|
775 |
for( idx = 0; idx < num_locs; idx++ ) {
|
|
776 |
FillLocArray( idx, sfn->local(jvms, idx), locarray );
|
|
777 |
}
|
|
778 |
|
|
779 |
// Insert expression stack entries into the exparray
|
|
780 |
GrowableArray<ScopeValue*> *exparray = new GrowableArray<ScopeValue*>(num_exps);
|
|
781 |
for( idx = 0; idx < num_exps; idx++ ) {
|
|
782 |
FillLocArray( idx, sfn->stack(jvms, idx), exparray );
|
|
783 |
}
|
|
784 |
|
|
785 |
// Add in mappings of the monitors
|
|
786 |
assert( !method ||
|
|
787 |
!method->is_synchronized() ||
|
|
788 |
method->is_native() ||
|
|
789 |
num_mon > 0 ||
|
|
790 |
!GenerateSynchronizationCode,
|
|
791 |
"monitors must always exist for synchronized methods");
|
|
792 |
|
|
793 |
// Build the growable array of ScopeValues for exp stack
|
|
794 |
GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
|
|
795 |
|
|
796 |
// Loop over monitors and insert into array
|
|
797 |
for(idx = 0; idx < num_mon; idx++) {
|
|
798 |
// Grab the node that defines this monitor
|
|
799 |
Node* box_node;
|
|
800 |
Node* obj_node;
|
|
801 |
box_node = sfn->monitor_box(jvms, idx);
|
|
802 |
obj_node = sfn->monitor_obj(jvms, idx);
|
|
803 |
|
|
804 |
// Create ScopeValue for object
|
|
805 |
ScopeValue *scval = NULL;
|
|
806 |
if( !obj_node->is_Con() ) {
|
|
807 |
OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
|
|
808 |
scval = new_loc_value( _regalloc, obj_reg, Location::oop );
|
|
809 |
} else {
|
|
810 |
scval = new ConstantOopWriteValue(obj_node->bottom_type()->is_instptr()->const_oop()->encoding());
|
|
811 |
}
|
|
812 |
|
|
813 |
OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
|
|
814 |
monarray->append(new MonitorValue(scval, Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg))));
|
|
815 |
}
|
|
816 |
|
|
817 |
// Build first class objects to pass to scope
|
|
818 |
DebugToken *locvals = debug_info()->create_scope_values(locarray);
|
|
819 |
DebugToken *expvals = debug_info()->create_scope_values(exparray);
|
|
820 |
DebugToken *monvals = debug_info()->create_monitor_values(monarray);
|
|
821 |
|
|
822 |
// Make method available for all Safepoints
|
|
823 |
ciMethod* scope_method = method ? method : _method;
|
|
824 |
// Describe the scope here
|
|
825 |
assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
|
|
826 |
debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),locvals,expvals,monvals);
|
|
827 |
} // End jvms loop
|
|
828 |
|
|
829 |
// Mark the end of the scope set.
|
|
830 |
debug_info()->end_safepoint(safepoint_pc_offset);
|
|
831 |
}
|
|
832 |
|
|
833 |
|
|
834 |
|
|
835 |
// A simplified version of Process_OopMap_Node, to handle non-safepoints.
|
|
836 |
class NonSafepointEmitter {
|
|
837 |
Compile* C;
|
|
838 |
JVMState* _pending_jvms;
|
|
839 |
int _pending_offset;
|
|
840 |
|
|
841 |
void emit_non_safepoint();
|
|
842 |
|
|
843 |
public:
|
|
844 |
NonSafepointEmitter(Compile* compile) {
|
|
845 |
this->C = compile;
|
|
846 |
_pending_jvms = NULL;
|
|
847 |
_pending_offset = 0;
|
|
848 |
}
|
|
849 |
|
|
850 |
void observe_instruction(Node* n, int pc_offset) {
|
|
851 |
if (!C->debug_info()->recording_non_safepoints()) return;
|
|
852 |
|
|
853 |
Node_Notes* nn = C->node_notes_at(n->_idx);
|
|
854 |
if (nn == NULL || nn->jvms() == NULL) return;
|
|
855 |
if (_pending_jvms != NULL &&
|
|
856 |
_pending_jvms->same_calls_as(nn->jvms())) {
|
|
857 |
// Repeated JVMS? Stretch it up here.
|
|
858 |
_pending_offset = pc_offset;
|
|
859 |
} else {
|
|
860 |
if (_pending_jvms != NULL &&
|
|
861 |
_pending_offset < pc_offset) {
|
|
862 |
emit_non_safepoint();
|
|
863 |
}
|
|
864 |
_pending_jvms = NULL;
|
|
865 |
if (pc_offset > C->debug_info()->last_pc_offset()) {
|
|
866 |
// This is the only way _pending_jvms can become non-NULL:
|
|
867 |
_pending_jvms = nn->jvms();
|
|
868 |
_pending_offset = pc_offset;
|
|
869 |
}
|
|
870 |
}
|
|
871 |
}
|
|
872 |
|
|
873 |
// Stay out of the way of real safepoints:
|
|
874 |
void observe_safepoint(JVMState* jvms, int pc_offset) {
|
|
875 |
if (_pending_jvms != NULL &&
|
|
876 |
!_pending_jvms->same_calls_as(jvms) &&
|
|
877 |
_pending_offset < pc_offset) {
|
|
878 |
emit_non_safepoint();
|
|
879 |
}
|
|
880 |
_pending_jvms = NULL;
|
|
881 |
}
|
|
882 |
|
|
883 |
void flush_at_end() {
|
|
884 |
if (_pending_jvms != NULL) {
|
|
885 |
emit_non_safepoint();
|
|
886 |
}
|
|
887 |
_pending_jvms = NULL;
|
|
888 |
}
|
|
889 |
};
|
|
890 |
|
|
891 |
void NonSafepointEmitter::emit_non_safepoint() {
|
|
892 |
JVMState* youngest_jvms = _pending_jvms;
|
|
893 |
int pc_offset = _pending_offset;
|
|
894 |
|
|
895 |
// Clear it now:
|
|
896 |
_pending_jvms = NULL;
|
|
897 |
|
|
898 |
DebugInformationRecorder* debug_info = C->debug_info();
|
|
899 |
assert(debug_info->recording_non_safepoints(), "sanity");
|
|
900 |
|
|
901 |
debug_info->add_non_safepoint(pc_offset);
|
|
902 |
int max_depth = youngest_jvms->depth();
|
|
903 |
|
|
904 |
// Visit scopes from oldest to youngest.
|
|
905 |
for (int depth = 1; depth <= max_depth; depth++) {
|
|
906 |
JVMState* jvms = youngest_jvms->of_depth(depth);
|
|
907 |
ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
|
|
908 |
debug_info->describe_scope(pc_offset, method, jvms->bci());
|
|
909 |
}
|
|
910 |
|
|
911 |
// Mark the end of the scope set.
|
|
912 |
debug_info->end_non_safepoint(pc_offset);
|
|
913 |
}
|
|
914 |
|
|
915 |
|
|
916 |
|
|
917 |
// helper for Fill_buffer bailout logic
|
|
918 |
static void turn_off_compiler(Compile* C) {
|
|
919 |
if (CodeCache::unallocated_capacity() >= CodeCacheMinimumFreeSpace*10) {
|
|
920 |
// Do not turn off compilation if a single giant method has
|
|
921 |
// blown the code cache size.
|
|
922 |
C->record_failure("excessive request to CodeCache");
|
|
923 |
} else {
|
201
|
924 |
// Let CompilerBroker disable further compilations.
|
1
|
925 |
C->record_failure("CodeCache is full");
|
|
926 |
}
|
|
927 |
}
|
|
928 |
|
|
929 |
|
|
930 |
//------------------------------Fill_buffer------------------------------------
|
|
931 |
void Compile::Fill_buffer() {
|
|
932 |
|
|
933 |
// Set the initially allocated size
|
|
934 |
int code_req = initial_code_capacity;
|
|
935 |
int locs_req = initial_locs_capacity;
|
|
936 |
int stub_req = TraceJumps ? initial_stub_capacity * 10 : initial_stub_capacity;
|
|
937 |
int const_req = initial_const_capacity;
|
|
938 |
bool labels_not_set = true;
|
|
939 |
|
|
940 |
int pad_req = NativeCall::instruction_size;
|
|
941 |
// The extra spacing after the code is necessary on some platforms.
|
|
942 |
// Sometimes we need to patch in a jump after the last instruction,
|
|
943 |
// if the nmethod has been deoptimized. (See 4932387, 4894843.)
|
|
944 |
|
|
945 |
uint i;
|
|
946 |
// Compute the byte offset where we can store the deopt pc.
|
|
947 |
if (fixed_slots() != 0) {
|
|
948 |
_orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
|
|
949 |
}
|
|
950 |
|
|
951 |
// Compute prolog code size
|
|
952 |
_method_size = 0;
|
|
953 |
_frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
|
|
954 |
#ifdef IA64
|
|
955 |
if (save_argument_registers()) {
|
|
956 |
// 4815101: this is a stub with implicit and unknown precision fp args.
|
|
957 |
// The usual spill mechanism can only generate stfd's in this case, which
|
|
958 |
// doesn't work if the fp reg to spill contains a single-precision denorm.
|
|
959 |
// Instead, we hack around the normal spill mechanism using stfspill's and
|
|
960 |
// ldffill's in the MachProlog and MachEpilog emit methods. We allocate
|
|
961 |
// space here for the fp arg regs (f8-f15) we're going to thusly spill.
|
|
962 |
//
|
|
963 |
// If we ever implement 16-byte 'registers' == stack slots, we can
|
|
964 |
// get rid of this hack and have SpillCopy generate stfspill/ldffill
|
|
965 |
// instead of stfd/stfs/ldfd/ldfs.
|
|
966 |
_frame_slots += 8*(16/BytesPerInt);
|
|
967 |
}
|
|
968 |
#endif
|
|
969 |
assert( _frame_slots >= 0 && _frame_slots < 1000000, "sanity check" );
|
|
970 |
|
|
971 |
// Create an array of unused labels, one for each basic block
|
|
972 |
Label *blk_labels = NEW_RESOURCE_ARRAY(Label, _cfg->_num_blocks+1);
|
|
973 |
|
|
974 |
for( i=0; i <= _cfg->_num_blocks; i++ ) {
|
|
975 |
blk_labels[i].init();
|
|
976 |
}
|
|
977 |
|
|
978 |
// If this machine supports different size branch offsets, then pre-compute
|
|
979 |
// the length of the blocks
|
|
980 |
if( _matcher->is_short_branch_offset(0) ) {
|
|
981 |
Shorten_branches(blk_labels, code_req, locs_req, stub_req, const_req);
|
|
982 |
labels_not_set = false;
|
|
983 |
}
|
|
984 |
|
|
985 |
// nmethod and CodeBuffer count stubs & constants as part of method's code.
|
|
986 |
int exception_handler_req = size_exception_handler();
|
|
987 |
int deopt_handler_req = size_deopt_handler();
|
|
988 |
exception_handler_req += MAX_stubs_size; // add marginal slop for handler
|
|
989 |
deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
|
|
990 |
stub_req += MAX_stubs_size; // ensure per-stub margin
|
|
991 |
code_req += MAX_inst_size; // ensure per-instruction margin
|
|
992 |
if (StressCodeBuffers)
|
|
993 |
code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
|
|
994 |
int total_req = code_req + pad_req + stub_req + exception_handler_req + deopt_handler_req + const_req;
|
|
995 |
CodeBuffer* cb = code_buffer();
|
|
996 |
cb->initialize(total_req, locs_req);
|
|
997 |
|
|
998 |
// Have we run out of code space?
|
|
999 |
if (cb->blob() == NULL) {
|
|
1000 |
turn_off_compiler(this);
|
|
1001 |
return;
|
|
1002 |
}
|
|
1003 |
// Configure the code buffer.
|
|
1004 |
cb->initialize_consts_size(const_req);
|
|
1005 |
cb->initialize_stubs_size(stub_req);
|
|
1006 |
cb->initialize_oop_recorder(env()->oop_recorder());
|
|
1007 |
|
|
1008 |
// fill in the nop array for bundling computations
|
|
1009 |
MachNode *_nop_list[Bundle::_nop_count];
|
|
1010 |
Bundle::initialize_nops(_nop_list, this);
|
|
1011 |
|
|
1012 |
// Create oopmap set.
|
|
1013 |
_oop_map_set = new OopMapSet();
|
|
1014 |
|
|
1015 |
// !!!!! This preserves old handling of oopmaps for now
|
|
1016 |
debug_info()->set_oopmaps(_oop_map_set);
|
|
1017 |
|
|
1018 |
// Count and start of implicit null check instructions
|
|
1019 |
uint inct_cnt = 0;
|
|
1020 |
uint *inct_starts = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
|
|
1021 |
|
|
1022 |
// Count and start of calls
|
|
1023 |
uint *call_returns = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
|
|
1024 |
|
|
1025 |
uint return_offset = 0;
|
|
1026 |
MachNode *nop = new (this) MachNopNode();
|
|
1027 |
|
|
1028 |
int previous_offset = 0;
|
|
1029 |
int current_offset = 0;
|
|
1030 |
int last_call_offset = -1;
|
|
1031 |
|
|
1032 |
// Create an array of unused labels, one for each basic block, if printing is enabled
|
|
1033 |
#ifndef PRODUCT
|
|
1034 |
int *node_offsets = NULL;
|
|
1035 |
uint node_offset_limit = unique();
|
|
1036 |
|
|
1037 |
|
|
1038 |
if ( print_assembly() )
|
|
1039 |
node_offsets = NEW_RESOURCE_ARRAY(int, node_offset_limit);
|
|
1040 |
#endif
|
|
1041 |
|
|
1042 |
NonSafepointEmitter non_safepoints(this); // emit non-safepoints lazily
|
|
1043 |
|
|
1044 |
// ------------------
|
|
1045 |
// Now fill in the code buffer
|
|
1046 |
Node *delay_slot = NULL;
|
|
1047 |
|
|
1048 |
for( i=0; i < _cfg->_num_blocks; i++ ) {
|
|
1049 |
Block *b = _cfg->_blocks[i];
|
|
1050 |
|
|
1051 |
Node *head = b->head();
|
|
1052 |
|
|
1053 |
// If this block needs to start aligned (i.e, can be reached other
|
|
1054 |
// than by falling-thru from the previous block), then force the
|
|
1055 |
// start of a new bundle.
|
|
1056 |
if( Pipeline::requires_bundling() && starts_bundle(head) )
|
|
1057 |
cb->flush_bundle(true);
|
|
1058 |
|
|
1059 |
// Define the label at the beginning of the basic block
|
|
1060 |
if( labels_not_set )
|
|
1061 |
MacroAssembler(cb).bind( blk_labels[b->_pre_order] );
|
|
1062 |
|
|
1063 |
else
|
|
1064 |
assert( blk_labels[b->_pre_order].loc_pos() == cb->code_size(),
|
|
1065 |
"label position does not match code offset" );
|
|
1066 |
|
|
1067 |
uint last_inst = b->_nodes.size();
|
|
1068 |
|
|
1069 |
// Emit block normally, except for last instruction.
|
|
1070 |
// Emit means "dump code bits into code buffer".
|
|
1071 |
for( uint j = 0; j<last_inst; j++ ) {
|
|
1072 |
|
|
1073 |
// Get the node
|
|
1074 |
Node* n = b->_nodes[j];
|
|
1075 |
|
|
1076 |
// See if delay slots are supported
|
|
1077 |
if (valid_bundle_info(n) &&
|
|
1078 |
node_bundling(n)->used_in_unconditional_delay()) {
|
|
1079 |
assert(delay_slot == NULL, "no use of delay slot node");
|
|
1080 |
assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
|
|
1081 |
|
|
1082 |
delay_slot = n;
|
|
1083 |
continue;
|
|
1084 |
}
|
|
1085 |
|
|
1086 |
// If this starts a new instruction group, then flush the current one
|
|
1087 |
// (but allow split bundles)
|
|
1088 |
if( Pipeline::requires_bundling() && starts_bundle(n) )
|
|
1089 |
cb->flush_bundle(false);
|
|
1090 |
|
|
1091 |
// The following logic is duplicated in the code ifdeffed for
|
|
1092 |
// ENABLE_ZAP_DEAD_LOCALS which apppears above in this file. It
|
|
1093 |
// should be factored out. Or maybe dispersed to the nodes?
|
|
1094 |
|
|
1095 |
// Special handling for SafePoint/Call Nodes
|
|
1096 |
bool is_mcall = false;
|
|
1097 |
if( n->is_Mach() ) {
|
|
1098 |
MachNode *mach = n->as_Mach();
|
|
1099 |
is_mcall = n->is_MachCall();
|
|
1100 |
bool is_sfn = n->is_MachSafePoint();
|
|
1101 |
|
|
1102 |
// If this requires all previous instructions be flushed, then do so
|
|
1103 |
if( is_sfn || is_mcall || mach->alignment_required() != 1) {
|
|
1104 |
cb->flush_bundle(true);
|
|
1105 |
current_offset = cb->code_size();
|
|
1106 |
}
|
|
1107 |
|
|
1108 |
// align the instruction if necessary
|
|
1109 |
int nop_size = nop->size(_regalloc);
|
|
1110 |
int padding = mach->compute_padding(current_offset);
|
|
1111 |
// Make sure safepoint node for polling is distinct from a call's
|
|
1112 |
// return by adding a nop if needed.
|
|
1113 |
if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset ) {
|
|
1114 |
padding = nop_size;
|
|
1115 |
}
|
|
1116 |
assert( labels_not_set || padding == 0, "instruction should already be aligned")
|
|
1117 |
|
|
1118 |
if(padding > 0) {
|
|
1119 |
assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
|
|
1120 |
int nops_cnt = padding / nop_size;
|
|
1121 |
MachNode *nop = new (this) MachNopNode(nops_cnt);
|
|
1122 |
b->_nodes.insert(j++, nop);
|
|
1123 |
last_inst++;
|
|
1124 |
_cfg->_bbs.map( nop->_idx, b );
|
|
1125 |
nop->emit(*cb, _regalloc);
|
|
1126 |
cb->flush_bundle(true);
|
|
1127 |
current_offset = cb->code_size();
|
|
1128 |
}
|
|
1129 |
|
|
1130 |
// Remember the start of the last call in a basic block
|
|
1131 |
if (is_mcall) {
|
|
1132 |
MachCallNode *mcall = mach->as_MachCall();
|
|
1133 |
|
|
1134 |
// This destination address is NOT PC-relative
|
|
1135 |
mcall->method_set((intptr_t)mcall->entry_point());
|
|
1136 |
|
|
1137 |
// Save the return address
|
|
1138 |
call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
|
|
1139 |
|
|
1140 |
if (!mcall->is_safepoint_node()) {
|
|
1141 |
is_mcall = false;
|
|
1142 |
is_sfn = false;
|
|
1143 |
}
|
|
1144 |
}
|
|
1145 |
|
|
1146 |
// sfn will be valid whenever mcall is valid now because of inheritance
|
|
1147 |
if( is_sfn || is_mcall ) {
|
|
1148 |
|
|
1149 |
// Handle special safepoint nodes for synchronization
|
|
1150 |
if( !is_mcall ) {
|
|
1151 |
MachSafePointNode *sfn = mach->as_MachSafePoint();
|
|
1152 |
// !!!!! Stubs only need an oopmap right now, so bail out
|
|
1153 |
if( sfn->jvms()->method() == NULL) {
|
|
1154 |
// Write the oopmap directly to the code blob??!!
|
|
1155 |
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
1156 |
assert( !is_node_getting_a_safepoint(sfn), "logic does not match; false positive");
|
|
1157 |
# endif
|
|
1158 |
continue;
|
|
1159 |
}
|
|
1160 |
} // End synchronization
|
|
1161 |
|
|
1162 |
non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
|
|
1163 |
current_offset);
|
|
1164 |
Process_OopMap_Node(mach, current_offset);
|
|
1165 |
} // End if safepoint
|
|
1166 |
|
|
1167 |
// If this is a null check, then add the start of the previous instruction to the list
|
|
1168 |
else if( mach->is_MachNullCheck() ) {
|
|
1169 |
inct_starts[inct_cnt++] = previous_offset;
|
|
1170 |
}
|
|
1171 |
|
|
1172 |
// If this is a branch, then fill in the label with the target BB's label
|
|
1173 |
else if ( mach->is_Branch() ) {
|
|
1174 |
|
|
1175 |
if ( mach->ideal_Opcode() == Op_Jump ) {
|
|
1176 |
for (uint h = 0; h < b->_num_succs; h++ ) {
|
|
1177 |
Block* succs_block = b->_succs[h];
|
|
1178 |
for (uint j = 1; j < succs_block->num_preds(); j++) {
|
|
1179 |
Node* jpn = succs_block->pred(j);
|
|
1180 |
if ( jpn->is_JumpProj() && jpn->in(0) == mach ) {
|
|
1181 |
uint block_num = succs_block->non_connector()->_pre_order;
|
|
1182 |
Label *blkLabel = &blk_labels[block_num];
|
|
1183 |
mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
|
|
1184 |
}
|
|
1185 |
}
|
|
1186 |
}
|
|
1187 |
} else {
|
|
1188 |
// For Branchs
|
|
1189 |
// This requires the TRUE branch target be in succs[0]
|
|
1190 |
uint block_num = b->non_connector_successor(0)->_pre_order;
|
|
1191 |
mach->label_set( blk_labels[block_num], block_num );
|
|
1192 |
}
|
|
1193 |
}
|
|
1194 |
|
|
1195 |
#ifdef ASSERT
|
|
1196 |
// Check that oop-store preceeds the card-mark
|
|
1197 |
else if( mach->ideal_Opcode() == Op_StoreCM ) {
|
|
1198 |
uint storeCM_idx = j;
|
|
1199 |
Node *oop_store = mach->in(mach->_cnt); // First precedence edge
|
|
1200 |
assert( oop_store != NULL, "storeCM expects a precedence edge");
|
|
1201 |
uint i4;
|
|
1202 |
for( i4 = 0; i4 < last_inst; ++i4 ) {
|
|
1203 |
if( b->_nodes[i4] == oop_store ) break;
|
|
1204 |
}
|
|
1205 |
// Note: This test can provide a false failure if other precedence
|
|
1206 |
// edges have been added to the storeCMNode.
|
|
1207 |
assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
|
|
1208 |
}
|
|
1209 |
#endif
|
|
1210 |
|
|
1211 |
else if( !n->is_Proj() ) {
|
|
1212 |
// Remember the begining of the previous instruction, in case
|
|
1213 |
// it's followed by a flag-kill and a null-check. Happens on
|
|
1214 |
// Intel all the time, with add-to-memory kind of opcodes.
|
|
1215 |
previous_offset = current_offset;
|
|
1216 |
}
|
|
1217 |
}
|
|
1218 |
|
|
1219 |
// Verify that there is sufficient space remaining
|
|
1220 |
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
|
|
1221 |
if (cb->blob() == NULL) {
|
|
1222 |
turn_off_compiler(this);
|
|
1223 |
return;
|
|
1224 |
}
|
|
1225 |
|
|
1226 |
// Save the offset for the listing
|
|
1227 |
#ifndef PRODUCT
|
|
1228 |
if( node_offsets && n->_idx < node_offset_limit )
|
|
1229 |
node_offsets[n->_idx] = cb->code_size();
|
|
1230 |
#endif
|
|
1231 |
|
|
1232 |
// "Normal" instruction case
|
|
1233 |
n->emit(*cb, _regalloc);
|
|
1234 |
current_offset = cb->code_size();
|
|
1235 |
non_safepoints.observe_instruction(n, current_offset);
|
|
1236 |
|
|
1237 |
// mcall is last "call" that can be a safepoint
|
|
1238 |
// record it so we can see if a poll will directly follow it
|
|
1239 |
// in which case we'll need a pad to make the PcDesc sites unique
|
|
1240 |
// see 5010568. This can be slightly inaccurate but conservative
|
|
1241 |
// in the case that return address is not actually at current_offset.
|
|
1242 |
// This is a small price to pay.
|
|
1243 |
|
|
1244 |
if (is_mcall) {
|
|
1245 |
last_call_offset = current_offset;
|
|
1246 |
}
|
|
1247 |
|
|
1248 |
// See if this instruction has a delay slot
|
|
1249 |
if ( valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
|
|
1250 |
assert(delay_slot != NULL, "expecting delay slot node");
|
|
1251 |
|
|
1252 |
// Back up 1 instruction
|
|
1253 |
cb->set_code_end(
|
|
1254 |
cb->code_end()-Pipeline::instr_unit_size());
|
|
1255 |
|
|
1256 |
// Save the offset for the listing
|
|
1257 |
#ifndef PRODUCT
|
|
1258 |
if( node_offsets && delay_slot->_idx < node_offset_limit )
|
|
1259 |
node_offsets[delay_slot->_idx] = cb->code_size();
|
|
1260 |
#endif
|
|
1261 |
|
|
1262 |
// Support a SafePoint in the delay slot
|
|
1263 |
if( delay_slot->is_MachSafePoint() ) {
|
|
1264 |
MachNode *mach = delay_slot->as_Mach();
|
|
1265 |
// !!!!! Stubs only need an oopmap right now, so bail out
|
|
1266 |
if( !mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL ) {
|
|
1267 |
// Write the oopmap directly to the code blob??!!
|
|
1268 |
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
1269 |
assert( !is_node_getting_a_safepoint(mach), "logic does not match; false positive");
|
|
1270 |
# endif
|
|
1271 |
delay_slot = NULL;
|
|
1272 |
continue;
|
|
1273 |
}
|
|
1274 |
|
|
1275 |
int adjusted_offset = current_offset - Pipeline::instr_unit_size();
|
|
1276 |
non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
|
|
1277 |
adjusted_offset);
|
|
1278 |
// Generate an OopMap entry
|
|
1279 |
Process_OopMap_Node(mach, adjusted_offset);
|
|
1280 |
}
|
|
1281 |
|
|
1282 |
// Insert the delay slot instruction
|
|
1283 |
delay_slot->emit(*cb, _regalloc);
|
|
1284 |
|
|
1285 |
// Don't reuse it
|
|
1286 |
delay_slot = NULL;
|
|
1287 |
}
|
|
1288 |
|
|
1289 |
} // End for all instructions in block
|
|
1290 |
|
|
1291 |
// If the next block _starts_ a loop, pad this block out to align
|
|
1292 |
// the loop start a little. Helps prevent pipe stalls at loop starts
|
|
1293 |
int nop_size = (new (this) MachNopNode())->size(_regalloc);
|
|
1294 |
if( i<_cfg->_num_blocks-1 ) {
|
|
1295 |
Block *nb = _cfg->_blocks[i+1];
|
|
1296 |
uint padding = nb->alignment_padding(current_offset);
|
|
1297 |
if( padding > 0 ) {
|
|
1298 |
MachNode *nop = new (this) MachNopNode(padding / nop_size);
|
|
1299 |
b->_nodes.insert( b->_nodes.size(), nop );
|
|
1300 |
_cfg->_bbs.map( nop->_idx, b );
|
|
1301 |
nop->emit(*cb, _regalloc);
|
|
1302 |
current_offset = cb->code_size();
|
|
1303 |
}
|
|
1304 |
}
|
|
1305 |
|
|
1306 |
} // End of for all blocks
|
|
1307 |
|
|
1308 |
non_safepoints.flush_at_end();
|
|
1309 |
|
|
1310 |
// Offset too large?
|
|
1311 |
if (failing()) return;
|
|
1312 |
|
|
1313 |
// Define a pseudo-label at the end of the code
|
|
1314 |
MacroAssembler(cb).bind( blk_labels[_cfg->_num_blocks] );
|
|
1315 |
|
|
1316 |
// Compute the size of the first block
|
|
1317 |
_first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
|
|
1318 |
|
|
1319 |
assert(cb->code_size() < 500000, "method is unreasonably large");
|
|
1320 |
|
|
1321 |
// ------------------
|
|
1322 |
|
|
1323 |
#ifndef PRODUCT
|
|
1324 |
// Information on the size of the method, without the extraneous code
|
|
1325 |
Scheduling::increment_method_size(cb->code_size());
|
|
1326 |
#endif
|
|
1327 |
|
|
1328 |
// ------------------
|
|
1329 |
// Fill in exception table entries.
|
|
1330 |
FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
|
|
1331 |
|
|
1332 |
// Only java methods have exception handlers and deopt handlers
|
|
1333 |
if (_method) {
|
|
1334 |
// Emit the exception handler code.
|
|
1335 |
_code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
|
|
1336 |
// Emit the deopt handler code.
|
|
1337 |
_code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
|
|
1338 |
}
|
|
1339 |
|
|
1340 |
// One last check for failed CodeBuffer::expand:
|
|
1341 |
if (cb->blob() == NULL) {
|
|
1342 |
turn_off_compiler(this);
|
|
1343 |
return;
|
|
1344 |
}
|
|
1345 |
|
|
1346 |
#ifndef PRODUCT
|
|
1347 |
// Dump the assembly code, including basic-block numbers
|
|
1348 |
if (print_assembly()) {
|
|
1349 |
ttyLocker ttyl; // keep the following output all in one block
|
|
1350 |
if (!VMThread::should_terminate()) { // test this under the tty lock
|
|
1351 |
// This output goes directly to the tty, not the compiler log.
|
|
1352 |
// To enable tools to match it up with the compilation activity,
|
|
1353 |
// be sure to tag this tty output with the compile ID.
|
|
1354 |
if (xtty != NULL) {
|
|
1355 |
xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
|
|
1356 |
is_osr_compilation() ? " compile_kind='osr'" :
|
|
1357 |
"");
|
|
1358 |
}
|
|
1359 |
if (method() != NULL) {
|
|
1360 |
method()->print_oop();
|
|
1361 |
print_codes();
|
|
1362 |
}
|
|
1363 |
dump_asm(node_offsets, node_offset_limit);
|
|
1364 |
if (xtty != NULL) {
|
|
1365 |
xtty->tail("opto_assembly");
|
|
1366 |
}
|
|
1367 |
}
|
|
1368 |
}
|
|
1369 |
#endif
|
|
1370 |
|
|
1371 |
}
|
|
1372 |
|
|
1373 |
void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
|
|
1374 |
_inc_table.set_size(cnt);
|
|
1375 |
|
|
1376 |
uint inct_cnt = 0;
|
|
1377 |
for( uint i=0; i<_cfg->_num_blocks; i++ ) {
|
|
1378 |
Block *b = _cfg->_blocks[i];
|
|
1379 |
Node *n = NULL;
|
|
1380 |
int j;
|
|
1381 |
|
|
1382 |
// Find the branch; ignore trailing NOPs.
|
|
1383 |
for( j = b->_nodes.size()-1; j>=0; j-- ) {
|
|
1384 |
n = b->_nodes[j];
|
|
1385 |
if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con )
|
|
1386 |
break;
|
|
1387 |
}
|
|
1388 |
|
|
1389 |
// If we didn't find anything, continue
|
|
1390 |
if( j < 0 ) continue;
|
|
1391 |
|
|
1392 |
// Compute ExceptionHandlerTable subtable entry and add it
|
|
1393 |
// (skip empty blocks)
|
|
1394 |
if( n->is_Catch() ) {
|
|
1395 |
|
|
1396 |
// Get the offset of the return from the call
|
|
1397 |
uint call_return = call_returns[b->_pre_order];
|
|
1398 |
#ifdef ASSERT
|
|
1399 |
assert( call_return > 0, "no call seen for this basic block" );
|
|
1400 |
while( b->_nodes[--j]->Opcode() == Op_MachProj ) ;
|
|
1401 |
assert( b->_nodes[j]->is_Call(), "CatchProj must follow call" );
|
|
1402 |
#endif
|
|
1403 |
// last instruction is a CatchNode, find it's CatchProjNodes
|
|
1404 |
int nof_succs = b->_num_succs;
|
|
1405 |
// allocate space
|
|
1406 |
GrowableArray<intptr_t> handler_bcis(nof_succs);
|
|
1407 |
GrowableArray<intptr_t> handler_pcos(nof_succs);
|
|
1408 |
// iterate through all successors
|
|
1409 |
for (int j = 0; j < nof_succs; j++) {
|
|
1410 |
Block* s = b->_succs[j];
|
|
1411 |
bool found_p = false;
|
|
1412 |
for( uint k = 1; k < s->num_preds(); k++ ) {
|
|
1413 |
Node *pk = s->pred(k);
|
|
1414 |
if( pk->is_CatchProj() && pk->in(0) == n ) {
|
|
1415 |
const CatchProjNode* p = pk->as_CatchProj();
|
|
1416 |
found_p = true;
|
|
1417 |
// add the corresponding handler bci & pco information
|
|
1418 |
if( p->_con != CatchProjNode::fall_through_index ) {
|
|
1419 |
// p leads to an exception handler (and is not fall through)
|
|
1420 |
assert(s == _cfg->_blocks[s->_pre_order],"bad numbering");
|
|
1421 |
// no duplicates, please
|
|
1422 |
if( !handler_bcis.contains(p->handler_bci()) ) {
|
|
1423 |
uint block_num = s->non_connector()->_pre_order;
|
|
1424 |
handler_bcis.append(p->handler_bci());
|
|
1425 |
handler_pcos.append(blk_labels[block_num].loc_pos());
|
|
1426 |
}
|
|
1427 |
}
|
|
1428 |
}
|
|
1429 |
}
|
|
1430 |
assert(found_p, "no matching predecessor found");
|
|
1431 |
// Note: Due to empty block removal, one block may have
|
|
1432 |
// several CatchProj inputs, from the same Catch.
|
|
1433 |
}
|
|
1434 |
|
|
1435 |
// Set the offset of the return from the call
|
|
1436 |
_handler_table.add_subtable(call_return, &handler_bcis, NULL, &handler_pcos);
|
|
1437 |
continue;
|
|
1438 |
}
|
|
1439 |
|
|
1440 |
// Handle implicit null exception table updates
|
|
1441 |
if( n->is_MachNullCheck() ) {
|
|
1442 |
uint block_num = b->non_connector_successor(0)->_pre_order;
|
|
1443 |
_inc_table.append( inct_starts[inct_cnt++], blk_labels[block_num].loc_pos() );
|
|
1444 |
continue;
|
|
1445 |
}
|
|
1446 |
} // End of for all blocks fill in exception table entries
|
|
1447 |
}
|
|
1448 |
|
|
1449 |
// Static Variables
|
|
1450 |
#ifndef PRODUCT
|
|
1451 |
uint Scheduling::_total_nop_size = 0;
|
|
1452 |
uint Scheduling::_total_method_size = 0;
|
|
1453 |
uint Scheduling::_total_branches = 0;
|
|
1454 |
uint Scheduling::_total_unconditional_delays = 0;
|
|
1455 |
uint Scheduling::_total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
|
|
1456 |
#endif
|
|
1457 |
|
|
1458 |
// Initializer for class Scheduling
|
|
1459 |
|
|
1460 |
Scheduling::Scheduling(Arena *arena, Compile &compile)
|
|
1461 |
: _arena(arena),
|
|
1462 |
_cfg(compile.cfg()),
|
|
1463 |
_bbs(compile.cfg()->_bbs),
|
|
1464 |
_regalloc(compile.regalloc()),
|
|
1465 |
_reg_node(arena),
|
|
1466 |
_bundle_instr_count(0),
|
|
1467 |
_bundle_cycle_number(0),
|
|
1468 |
_scheduled(arena),
|
|
1469 |
_available(arena),
|
|
1470 |
_next_node(NULL),
|
|
1471 |
_bundle_use(0, 0, resource_count, &_bundle_use_elements[0]),
|
|
1472 |
_pinch_free_list(arena)
|
|
1473 |
#ifndef PRODUCT
|
|
1474 |
, _branches(0)
|
|
1475 |
, _unconditional_delays(0)
|
|
1476 |
#endif
|
|
1477 |
{
|
|
1478 |
// Create a MachNopNode
|
|
1479 |
_nop = new (&compile) MachNopNode();
|
|
1480 |
|
|
1481 |
// Now that the nops are in the array, save the count
|
|
1482 |
// (but allow entries for the nops)
|
|
1483 |
_node_bundling_limit = compile.unique();
|
|
1484 |
uint node_max = _regalloc->node_regs_max_index();
|
|
1485 |
|
|
1486 |
compile.set_node_bundling_limit(_node_bundling_limit);
|
|
1487 |
|
|
1488 |
// This one is persistant within the Compile class
|
|
1489 |
_node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
|
|
1490 |
|
|
1491 |
// Allocate space for fixed-size arrays
|
|
1492 |
_node_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
|
|
1493 |
_uses = NEW_ARENA_ARRAY(arena, short, node_max);
|
|
1494 |
_current_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
|
|
1495 |
|
|
1496 |
// Clear the arrays
|
|
1497 |
memset(_node_bundling_base, 0, node_max * sizeof(Bundle));
|
|
1498 |
memset(_node_latency, 0, node_max * sizeof(unsigned short));
|
|
1499 |
memset(_uses, 0, node_max * sizeof(short));
|
|
1500 |
memset(_current_latency, 0, node_max * sizeof(unsigned short));
|
|
1501 |
|
|
1502 |
// Clear the bundling information
|
|
1503 |
memcpy(_bundle_use_elements,
|
|
1504 |
Pipeline_Use::elaborated_elements,
|
|
1505 |
sizeof(Pipeline_Use::elaborated_elements));
|
|
1506 |
|
|
1507 |
// Get the last node
|
|
1508 |
Block *bb = _cfg->_blocks[_cfg->_blocks.size()-1];
|
|
1509 |
|
|
1510 |
_next_node = bb->_nodes[bb->_nodes.size()-1];
|
|
1511 |
}
|
|
1512 |
|
|
1513 |
#ifndef PRODUCT
|
|
1514 |
// Scheduling destructor
|
|
1515 |
Scheduling::~Scheduling() {
|
|
1516 |
_total_branches += _branches;
|
|
1517 |
_total_unconditional_delays += _unconditional_delays;
|
|
1518 |
}
|
|
1519 |
#endif
|
|
1520 |
|
|
1521 |
// Step ahead "i" cycles
|
|
1522 |
void Scheduling::step(uint i) {
|
|
1523 |
|
|
1524 |
Bundle *bundle = node_bundling(_next_node);
|
|
1525 |
bundle->set_starts_bundle();
|
|
1526 |
|
|
1527 |
// Update the bundle record, but leave the flags information alone
|
|
1528 |
if (_bundle_instr_count > 0) {
|
|
1529 |
bundle->set_instr_count(_bundle_instr_count);
|
|
1530 |
bundle->set_resources_used(_bundle_use.resourcesUsed());
|
|
1531 |
}
|
|
1532 |
|
|
1533 |
// Update the state information
|
|
1534 |
_bundle_instr_count = 0;
|
|
1535 |
_bundle_cycle_number += i;
|
|
1536 |
_bundle_use.step(i);
|
|
1537 |
}
|
|
1538 |
|
|
1539 |
void Scheduling::step_and_clear() {
|
|
1540 |
Bundle *bundle = node_bundling(_next_node);
|
|
1541 |
bundle->set_starts_bundle();
|
|
1542 |
|
|
1543 |
// Update the bundle record
|
|
1544 |
if (_bundle_instr_count > 0) {
|
|
1545 |
bundle->set_instr_count(_bundle_instr_count);
|
|
1546 |
bundle->set_resources_used(_bundle_use.resourcesUsed());
|
|
1547 |
|
|
1548 |
_bundle_cycle_number += 1;
|
|
1549 |
}
|
|
1550 |
|
|
1551 |
// Clear the bundling information
|
|
1552 |
_bundle_instr_count = 0;
|
|
1553 |
_bundle_use.reset();
|
|
1554 |
|
|
1555 |
memcpy(_bundle_use_elements,
|
|
1556 |
Pipeline_Use::elaborated_elements,
|
|
1557 |
sizeof(Pipeline_Use::elaborated_elements));
|
|
1558 |
}
|
|
1559 |
|
|
1560 |
//------------------------------ScheduleAndBundle------------------------------
|
|
1561 |
// Perform instruction scheduling and bundling over the sequence of
|
|
1562 |
// instructions in backwards order.
|
|
1563 |
void Compile::ScheduleAndBundle() {
|
|
1564 |
|
|
1565 |
// Don't optimize this if it isn't a method
|
|
1566 |
if (!_method)
|
|
1567 |
return;
|
|
1568 |
|
|
1569 |
// Don't optimize this if scheduling is disabled
|
|
1570 |
if (!do_scheduling())
|
|
1571 |
return;
|
|
1572 |
|
|
1573 |
NOT_PRODUCT( TracePhase t2("isched", &_t_instrSched, TimeCompiler); )
|
|
1574 |
|
|
1575 |
// Create a data structure for all the scheduling information
|
|
1576 |
Scheduling scheduling(Thread::current()->resource_area(), *this);
|
|
1577 |
|
|
1578 |
// Walk backwards over each basic block, computing the needed alignment
|
|
1579 |
// Walk over all the basic blocks
|
|
1580 |
scheduling.DoScheduling();
|
|
1581 |
}
|
|
1582 |
|
|
1583 |
//------------------------------ComputeLocalLatenciesForward-------------------
|
|
1584 |
// Compute the latency of all the instructions. This is fairly simple,
|
|
1585 |
// because we already have a legal ordering. Walk over the instructions
|
|
1586 |
// from first to last, and compute the latency of the instruction based
|
|
1587 |
// on the latency of the preceeding instruction(s).
|
|
1588 |
void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
|
|
1589 |
#ifndef PRODUCT
|
|
1590 |
if (_cfg->C->trace_opto_output())
|
|
1591 |
tty->print("# -> ComputeLocalLatenciesForward\n");
|
|
1592 |
#endif
|
|
1593 |
|
|
1594 |
// Walk over all the schedulable instructions
|
|
1595 |
for( uint j=_bb_start; j < _bb_end; j++ ) {
|
|
1596 |
|
|
1597 |
// This is a kludge, forcing all latency calculations to start at 1.
|
|
1598 |
// Used to allow latency 0 to force an instruction to the beginning
|
|
1599 |
// of the bb
|
|
1600 |
uint latency = 1;
|
|
1601 |
Node *use = bb->_nodes[j];
|
|
1602 |
uint nlen = use->len();
|
|
1603 |
|
|
1604 |
// Walk over all the inputs
|
|
1605 |
for ( uint k=0; k < nlen; k++ ) {
|
|
1606 |
Node *def = use->in(k);
|
|
1607 |
if (!def)
|
|
1608 |
continue;
|
|
1609 |
|
|
1610 |
uint l = _node_latency[def->_idx] + use->latency(k);
|
|
1611 |
if (latency < l)
|
|
1612 |
latency = l;
|
|
1613 |
}
|
|
1614 |
|
|
1615 |
_node_latency[use->_idx] = latency;
|
|
1616 |
|
|
1617 |
#ifndef PRODUCT
|
|
1618 |
if (_cfg->C->trace_opto_output()) {
|
|
1619 |
tty->print("# latency %4d: ", latency);
|
|
1620 |
use->dump();
|
|
1621 |
}
|
|
1622 |
#endif
|
|
1623 |
}
|
|
1624 |
|
|
1625 |
#ifndef PRODUCT
|
|
1626 |
if (_cfg->C->trace_opto_output())
|
|
1627 |
tty->print("# <- ComputeLocalLatenciesForward\n");
|
|
1628 |
#endif
|
|
1629 |
|
|
1630 |
} // end ComputeLocalLatenciesForward
|
|
1631 |
|
|
1632 |
// See if this node fits into the present instruction bundle
|
|
1633 |
bool Scheduling::NodeFitsInBundle(Node *n) {
|
|
1634 |
uint n_idx = n->_idx;
|
|
1635 |
|
|
1636 |
// If this is the unconditional delay instruction, then it fits
|
|
1637 |
if (n == _unconditional_delay_slot) {
|
|
1638 |
#ifndef PRODUCT
|
|
1639 |
if (_cfg->C->trace_opto_output())
|
|
1640 |
tty->print("# NodeFitsInBundle [%4d]: TRUE; is in unconditional delay slot\n", n->_idx);
|
|
1641 |
#endif
|
|
1642 |
return (true);
|
|
1643 |
}
|
|
1644 |
|
|
1645 |
// If the node cannot be scheduled this cycle, skip it
|
|
1646 |
if (_current_latency[n_idx] > _bundle_cycle_number) {
|
|
1647 |
#ifndef PRODUCT
|
|
1648 |
if (_cfg->C->trace_opto_output())
|
|
1649 |
tty->print("# NodeFitsInBundle [%4d]: FALSE; latency %4d > %d\n",
|
|
1650 |
n->_idx, _current_latency[n_idx], _bundle_cycle_number);
|
|
1651 |
#endif
|
|
1652 |
return (false);
|
|
1653 |
}
|
|
1654 |
|
|
1655 |
const Pipeline *node_pipeline = n->pipeline();
|
|
1656 |
|
|
1657 |
uint instruction_count = node_pipeline->instructionCount();
|
|
1658 |
if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
|
|
1659 |
instruction_count = 0;
|
|
1660 |
else if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
|
|
1661 |
instruction_count++;
|
|
1662 |
|
|
1663 |
if (_bundle_instr_count + instruction_count > Pipeline::_max_instrs_per_cycle) {
|
|
1664 |
#ifndef PRODUCT
|
|
1665 |
if (_cfg->C->trace_opto_output())
|
|
1666 |
tty->print("# NodeFitsInBundle [%4d]: FALSE; too many instructions: %d > %d\n",
|
|
1667 |
n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle);
|
|
1668 |
#endif
|
|
1669 |
return (false);
|
|
1670 |
}
|
|
1671 |
|
|
1672 |
// Don't allow non-machine nodes to be handled this way
|
|
1673 |
if (!n->is_Mach() && instruction_count == 0)
|
|
1674 |
return (false);
|
|
1675 |
|
|
1676 |
// See if there is any overlap
|
|
1677 |
uint delay = _bundle_use.full_latency(0, node_pipeline->resourceUse());
|
|
1678 |
|
|
1679 |
if (delay > 0) {
|
|
1680 |
#ifndef PRODUCT
|
|
1681 |
if (_cfg->C->trace_opto_output())
|
|
1682 |
tty->print("# NodeFitsInBundle [%4d]: FALSE; functional units overlap\n", n_idx);
|
|
1683 |
#endif
|
|
1684 |
return false;
|
|
1685 |
}
|
|
1686 |
|
|
1687 |
#ifndef PRODUCT
|
|
1688 |
if (_cfg->C->trace_opto_output())
|
|
1689 |
tty->print("# NodeFitsInBundle [%4d]: TRUE\n", n_idx);
|
|
1690 |
#endif
|
|
1691 |
|
|
1692 |
return true;
|
|
1693 |
}
|
|
1694 |
|
|
1695 |
Node * Scheduling::ChooseNodeToBundle() {
|
|
1696 |
uint siz = _available.size();
|
|
1697 |
|
|
1698 |
if (siz == 0) {
|
|
1699 |
|
|
1700 |
#ifndef PRODUCT
|
|
1701 |
if (_cfg->C->trace_opto_output())
|
|
1702 |
tty->print("# ChooseNodeToBundle: NULL\n");
|
|
1703 |
#endif
|
|
1704 |
return (NULL);
|
|
1705 |
}
|
|
1706 |
|
|
1707 |
// Fast path, if only 1 instruction in the bundle
|
|
1708 |
if (siz == 1) {
|
|
1709 |
#ifndef PRODUCT
|
|
1710 |
if (_cfg->C->trace_opto_output()) {
|
|
1711 |
tty->print("# ChooseNodeToBundle (only 1): ");
|
|
1712 |
_available[0]->dump();
|
|
1713 |
}
|
|
1714 |
#endif
|
|
1715 |
return (_available[0]);
|
|
1716 |
}
|
|
1717 |
|
|
1718 |
// Don't bother, if the bundle is already full
|
|
1719 |
if (_bundle_instr_count < Pipeline::_max_instrs_per_cycle) {
|
|
1720 |
for ( uint i = 0; i < siz; i++ ) {
|
|
1721 |
Node *n = _available[i];
|
|
1722 |
|
|
1723 |
// Skip projections, we'll handle them another way
|
|
1724 |
if (n->is_Proj())
|
|
1725 |
continue;
|
|
1726 |
|
|
1727 |
// This presupposed that instructions are inserted into the
|
|
1728 |
// available list in a legality order; i.e. instructions that
|
|
1729 |
// must be inserted first are at the head of the list
|
|
1730 |
if (NodeFitsInBundle(n)) {
|
|
1731 |
#ifndef PRODUCT
|
|
1732 |
if (_cfg->C->trace_opto_output()) {
|
|
1733 |
tty->print("# ChooseNodeToBundle: ");
|
|
1734 |
n->dump();
|
|
1735 |
}
|
|
1736 |
#endif
|
|
1737 |
return (n);
|
|
1738 |
}
|
|
1739 |
}
|
|
1740 |
}
|
|
1741 |
|
|
1742 |
// Nothing fits in this bundle, choose the highest priority
|
|
1743 |
#ifndef PRODUCT
|
|
1744 |
if (_cfg->C->trace_opto_output()) {
|
|
1745 |
tty->print("# ChooseNodeToBundle: ");
|
|
1746 |
_available[0]->dump();
|
|
1747 |
}
|
|
1748 |
#endif
|
|
1749 |
|
|
1750 |
return _available[0];
|
|
1751 |
}
|
|
1752 |
|
|
1753 |
//------------------------------AddNodeToAvailableList-------------------------
|
|
1754 |
void Scheduling::AddNodeToAvailableList(Node *n) {
|
|
1755 |
assert( !n->is_Proj(), "projections never directly made available" );
|
|
1756 |
#ifndef PRODUCT
|
|
1757 |
if (_cfg->C->trace_opto_output()) {
|
|
1758 |
tty->print("# AddNodeToAvailableList: ");
|
|
1759 |
n->dump();
|
|
1760 |
}
|
|
1761 |
#endif
|
|
1762 |
|
|
1763 |
int latency = _current_latency[n->_idx];
|
|
1764 |
|
|
1765 |
// Insert in latency order (insertion sort)
|
|
1766 |
uint i;
|
|
1767 |
for ( i=0; i < _available.size(); i++ )
|
|
1768 |
if (_current_latency[_available[i]->_idx] > latency)
|
|
1769 |
break;
|
|
1770 |
|
|
1771 |
// Special Check for compares following branches
|
|
1772 |
if( n->is_Mach() && _scheduled.size() > 0 ) {
|
|
1773 |
int op = n->as_Mach()->ideal_Opcode();
|
|
1774 |
Node *last = _scheduled[0];
|
|
1775 |
if( last->is_MachIf() && last->in(1) == n &&
|
|
1776 |
( op == Op_CmpI ||
|
|
1777 |
op == Op_CmpU ||
|
|
1778 |
op == Op_CmpP ||
|
|
1779 |
op == Op_CmpF ||
|
|
1780 |
op == Op_CmpD ||
|
|
1781 |
op == Op_CmpL ) ) {
|
|
1782 |
|
|
1783 |
// Recalculate position, moving to front of same latency
|
|
1784 |
for ( i=0 ; i < _available.size(); i++ )
|
|
1785 |
if (_current_latency[_available[i]->_idx] >= latency)
|
|
1786 |
break;
|
|
1787 |
}
|
|
1788 |
}
|
|
1789 |
|
|
1790 |
// Insert the node in the available list
|
|
1791 |
_available.insert(i, n);
|
|
1792 |
|
|
1793 |
#ifndef PRODUCT
|
|
1794 |
if (_cfg->C->trace_opto_output())
|
|
1795 |
dump_available();
|
|
1796 |
#endif
|
|
1797 |
}
|
|
1798 |
|
|
1799 |
//------------------------------DecrementUseCounts-----------------------------
|
|
1800 |
void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
|
|
1801 |
for ( uint i=0; i < n->len(); i++ ) {
|
|
1802 |
Node *def = n->in(i);
|
|
1803 |
if (!def) continue;
|
|
1804 |
if( def->is_Proj() ) // If this is a machine projection, then
|
|
1805 |
def = def->in(0); // propagate usage thru to the base instruction
|
|
1806 |
|
|
1807 |
if( _bbs[def->_idx] != bb ) // Ignore if not block-local
|
|
1808 |
continue;
|
|
1809 |
|
|
1810 |
// Compute the latency
|
|
1811 |
uint l = _bundle_cycle_number + n->latency(i);
|
|
1812 |
if (_current_latency[def->_idx] < l)
|
|
1813 |
_current_latency[def->_idx] = l;
|
|
1814 |
|
|
1815 |
// If this does not have uses then schedule it
|
|
1816 |
if ((--_uses[def->_idx]) == 0)
|
|
1817 |
AddNodeToAvailableList(def);
|
|
1818 |
}
|
|
1819 |
}
|
|
1820 |
|
|
1821 |
//------------------------------AddNodeToBundle--------------------------------
|
|
1822 |
void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
|
|
1823 |
#ifndef PRODUCT
|
|
1824 |
if (_cfg->C->trace_opto_output()) {
|
|
1825 |
tty->print("# AddNodeToBundle: ");
|
|
1826 |
n->dump();
|
|
1827 |
}
|
|
1828 |
#endif
|
|
1829 |
|
|
1830 |
// Remove this from the available list
|
|
1831 |
uint i;
|
|
1832 |
for (i = 0; i < _available.size(); i++)
|
|
1833 |
if (_available[i] == n)
|
|
1834 |
break;
|
|
1835 |
assert(i < _available.size(), "entry in _available list not found");
|
|
1836 |
_available.remove(i);
|
|
1837 |
|
|
1838 |
// See if this fits in the current bundle
|
|
1839 |
const Pipeline *node_pipeline = n->pipeline();
|
|
1840 |
const Pipeline_Use& node_usage = node_pipeline->resourceUse();
|
|
1841 |
|
|
1842 |
// Check for instructions to be placed in the delay slot. We
|
|
1843 |
// do this before we actually schedule the current instruction,
|
|
1844 |
// because the delay slot follows the current instruction.
|
|
1845 |
if (Pipeline::_branch_has_delay_slot &&
|
|
1846 |
node_pipeline->hasBranchDelay() &&
|
|
1847 |
!_unconditional_delay_slot) {
|
|
1848 |
|
|
1849 |
uint siz = _available.size();
|
|
1850 |
|
|
1851 |
// Conditional branches can support an instruction that
|
|
1852 |
// is unconditionally executed and not dependant by the
|
|
1853 |
// branch, OR a conditionally executed instruction if
|
|
1854 |
// the branch is taken. In practice, this means that
|
|
1855 |
// the first instruction at the branch target is
|
|
1856 |
// copied to the delay slot, and the branch goes to
|
|
1857 |
// the instruction after that at the branch target
|
|
1858 |
if ( n->is_Mach() && n->is_Branch() ) {
|
|
1859 |
|
|
1860 |
assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
|
|
1861 |
assert( !n->is_Catch(), "should not look for delay slot for Catch" );
|
|
1862 |
|
|
1863 |
#ifndef PRODUCT
|
|
1864 |
_branches++;
|
|
1865 |
#endif
|
|
1866 |
|
|
1867 |
// At least 1 instruction is on the available list
|
|
1868 |
// that is not dependant on the branch
|
|
1869 |
for (uint i = 0; i < siz; i++) {
|
|
1870 |
Node *d = _available[i];
|
|
1871 |
const Pipeline *avail_pipeline = d->pipeline();
|
|
1872 |
|
|
1873 |
// Don't allow safepoints in the branch shadow, that will
|
|
1874 |
// cause a number of difficulties
|
|
1875 |
if ( avail_pipeline->instructionCount() == 1 &&
|
|
1876 |
!avail_pipeline->hasMultipleBundles() &&
|
|
1877 |
!avail_pipeline->hasBranchDelay() &&
|
|
1878 |
Pipeline::instr_has_unit_size() &&
|
|
1879 |
d->size(_regalloc) == Pipeline::instr_unit_size() &&
|
|
1880 |
NodeFitsInBundle(d) &&
|
|
1881 |
!node_bundling(d)->used_in_delay()) {
|
|
1882 |
|
|
1883 |
if (d->is_Mach() && !d->is_MachSafePoint()) {
|
|
1884 |
// A node that fits in the delay slot was found, so we need to
|
|
1885 |
// set the appropriate bits in the bundle pipeline information so
|
|
1886 |
// that it correctly indicates resource usage. Later, when we
|
|
1887 |
// attempt to add this instruction to the bundle, we will skip
|
|
1888 |
// setting the resource usage.
|
|
1889 |
_unconditional_delay_slot = d;
|
|
1890 |
node_bundling(n)->set_use_unconditional_delay();
|
|
1891 |
node_bundling(d)->set_used_in_unconditional_delay();
|
|
1892 |
_bundle_use.add_usage(avail_pipeline->resourceUse());
|
|
1893 |
_current_latency[d->_idx] = _bundle_cycle_number;
|
|
1894 |
_next_node = d;
|
|
1895 |
++_bundle_instr_count;
|
|
1896 |
#ifndef PRODUCT
|
|
1897 |
_unconditional_delays++;
|
|
1898 |
#endif
|
|
1899 |
break;
|
|
1900 |
}
|
|
1901 |
}
|
|
1902 |
}
|
|
1903 |
}
|
|
1904 |
|
|
1905 |
// No delay slot, add a nop to the usage
|
|
1906 |
if (!_unconditional_delay_slot) {
|
|
1907 |
// See if adding an instruction in the delay slot will overflow
|
|
1908 |
// the bundle.
|
|
1909 |
if (!NodeFitsInBundle(_nop)) {
|
|
1910 |
#ifndef PRODUCT
|
|
1911 |
if (_cfg->C->trace_opto_output())
|
|
1912 |
tty->print("# *** STEP(1 instruction for delay slot) ***\n");
|
|
1913 |
#endif
|
|
1914 |
step(1);
|
|
1915 |
}
|
|
1916 |
|
|
1917 |
_bundle_use.add_usage(_nop->pipeline()->resourceUse());
|
|
1918 |
_next_node = _nop;
|
|
1919 |
++_bundle_instr_count;
|
|
1920 |
}
|
|
1921 |
|
|
1922 |
// See if the instruction in the delay slot requires a
|
|
1923 |
// step of the bundles
|
|
1924 |
if (!NodeFitsInBundle(n)) {
|
|
1925 |
#ifndef PRODUCT
|
|
1926 |
if (_cfg->C->trace_opto_output())
|
|
1927 |
tty->print("# *** STEP(branch won't fit) ***\n");
|
|
1928 |
#endif
|
|
1929 |
// Update the state information
|
|
1930 |
_bundle_instr_count = 0;
|
|
1931 |
_bundle_cycle_number += 1;
|
|
1932 |
_bundle_use.step(1);
|
|
1933 |
}
|
|
1934 |
}
|
|
1935 |
|
|
1936 |
// Get the number of instructions
|
|
1937 |
uint instruction_count = node_pipeline->instructionCount();
|
|
1938 |
if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
|
|
1939 |
instruction_count = 0;
|
|
1940 |
|
|
1941 |
// Compute the latency information
|
|
1942 |
uint delay = 0;
|
|
1943 |
|
|
1944 |
if (instruction_count > 0 || !node_pipeline->mayHaveNoCode()) {
|
|
1945 |
int relative_latency = _current_latency[n->_idx] - _bundle_cycle_number;
|
|
1946 |
if (relative_latency < 0)
|
|
1947 |
relative_latency = 0;
|
|
1948 |
|
|
1949 |
delay = _bundle_use.full_latency(relative_latency, node_usage);
|
|
1950 |
|
|
1951 |
// Does not fit in this bundle, start a new one
|
|
1952 |
if (delay > 0) {
|
|
1953 |
step(delay);
|
|
1954 |
|
|
1955 |
#ifndef PRODUCT
|
|
1956 |
if (_cfg->C->trace_opto_output())
|
|
1957 |
tty->print("# *** STEP(%d) ***\n", delay);
|
|
1958 |
#endif
|
|
1959 |
}
|
|
1960 |
}
|
|
1961 |
|
|
1962 |
// If this was placed in the delay slot, ignore it
|
|
1963 |
if (n != _unconditional_delay_slot) {
|
|
1964 |
|
|
1965 |
if (delay == 0) {
|
|
1966 |
if (node_pipeline->hasMultipleBundles()) {
|
|
1967 |
#ifndef PRODUCT
|
|
1968 |
if (_cfg->C->trace_opto_output())
|
|
1969 |
tty->print("# *** STEP(multiple instructions) ***\n");
|
|
1970 |
#endif
|
|
1971 |
step(1);
|
|
1972 |
}
|
|
1973 |
|
|
1974 |
else if (instruction_count + _bundle_instr_count > Pipeline::_max_instrs_per_cycle) {
|
|
1975 |
#ifndef PRODUCT
|
|
1976 |
if (_cfg->C->trace_opto_output())
|
|
1977 |
tty->print("# *** STEP(%d >= %d instructions) ***\n",
|
|
1978 |
instruction_count + _bundle_instr_count,
|
|
1979 |
Pipeline::_max_instrs_per_cycle);
|
|
1980 |
#endif
|
|
1981 |
step(1);
|
|
1982 |
}
|
|
1983 |
}
|
|
1984 |
|
|
1985 |
if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
|
|
1986 |
_bundle_instr_count++;
|
|
1987 |
|
|
1988 |
// Set the node's latency
|
|
1989 |
_current_latency[n->_idx] = _bundle_cycle_number;
|
|
1990 |
|
|
1991 |
// Now merge the functional unit information
|
|
1992 |
if (instruction_count > 0 || !node_pipeline->mayHaveNoCode())
|
|
1993 |
_bundle_use.add_usage(node_usage);
|
|
1994 |
|
|
1995 |
// Increment the number of instructions in this bundle
|
|
1996 |
_bundle_instr_count += instruction_count;
|
|
1997 |
|
|
1998 |
// Remember this node for later
|
|
1999 |
if (n->is_Mach())
|
|
2000 |
_next_node = n;
|
|
2001 |
}
|
|
2002 |
|
|
2003 |
// It's possible to have a BoxLock in the graph and in the _bbs mapping but
|
|
2004 |
// not in the bb->_nodes array. This happens for debug-info-only BoxLocks.
|
|
2005 |
// 'Schedule' them (basically ignore in the schedule) but do not insert them
|
|
2006 |
// into the block. All other scheduled nodes get put in the schedule here.
|
|
2007 |
int op = n->Opcode();
|
|
2008 |
if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
|
|
2009 |
(op != Op_Node && // Not an unused antidepedence node and
|
|
2010 |
// not an unallocated boxlock
|
|
2011 |
(OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
|
|
2012 |
|
|
2013 |
// Push any trailing projections
|
|
2014 |
if( bb->_nodes[bb->_nodes.size()-1] != n ) {
|
|
2015 |
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
|
2016 |
Node *foi = n->fast_out(i);
|
|
2017 |
if( foi->is_Proj() )
|
|
2018 |
_scheduled.push(foi);
|
|
2019 |
}
|
|
2020 |
}
|
|
2021 |
|
|
2022 |
// Put the instruction in the schedule list
|
|
2023 |
_scheduled.push(n);
|
|
2024 |
}
|
|
2025 |
|
|
2026 |
#ifndef PRODUCT
|
|
2027 |
if (_cfg->C->trace_opto_output())
|
|
2028 |
dump_available();
|
|
2029 |
#endif
|
|
2030 |
|
|
2031 |
// Walk all the definitions, decrementing use counts, and
|
|
2032 |
// if a definition has a 0 use count, place it in the available list.
|
|
2033 |
DecrementUseCounts(n,bb);
|
|
2034 |
}
|
|
2035 |
|
|
2036 |
//------------------------------ComputeUseCount--------------------------------
|
|
2037 |
// This method sets the use count within a basic block. We will ignore all
|
|
2038 |
// uses outside the current basic block. As we are doing a backwards walk,
|
|
2039 |
// any node we reach that has a use count of 0 may be scheduled. This also
|
|
2040 |
// avoids the problem of cyclic references from phi nodes, as long as phi
|
|
2041 |
// nodes are at the front of the basic block. This method also initializes
|
|
2042 |
// the available list to the set of instructions that have no uses within this
|
|
2043 |
// basic block.
|
|
2044 |
void Scheduling::ComputeUseCount(const Block *bb) {
|
|
2045 |
#ifndef PRODUCT
|
|
2046 |
if (_cfg->C->trace_opto_output())
|
|
2047 |
tty->print("# -> ComputeUseCount\n");
|
|
2048 |
#endif
|
|
2049 |
|
|
2050 |
// Clear the list of available and scheduled instructions, just in case
|
|
2051 |
_available.clear();
|
|
2052 |
_scheduled.clear();
|
|
2053 |
|
|
2054 |
// No delay slot specified
|
|
2055 |
_unconditional_delay_slot = NULL;
|
|
2056 |
|
|
2057 |
#ifdef ASSERT
|
|
2058 |
for( uint i=0; i < bb->_nodes.size(); i++ )
|
|
2059 |
assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
|
|
2060 |
#endif
|
|
2061 |
|
|
2062 |
// Force the _uses count to never go to zero for unscheduable pieces
|
|
2063 |
// of the block
|
|
2064 |
for( uint k = 0; k < _bb_start; k++ )
|
|
2065 |
_uses[bb->_nodes[k]->_idx] = 1;
|
|
2066 |
for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
|
|
2067 |
_uses[bb->_nodes[l]->_idx] = 1;
|
|
2068 |
|
|
2069 |
// Iterate backwards over the instructions in the block. Don't count the
|
|
2070 |
// branch projections at end or the block header instructions.
|
|
2071 |
for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
|
|
2072 |
Node *n = bb->_nodes[j];
|
|
2073 |
if( n->is_Proj() ) continue; // Projections handled another way
|
|
2074 |
|
|
2075 |
// Account for all uses
|
|
2076 |
for ( uint k = 0; k < n->len(); k++ ) {
|
|
2077 |
Node *inp = n->in(k);
|
|
2078 |
if (!inp) continue;
|
|
2079 |
assert(inp != n, "no cycles allowed" );
|
|
2080 |
if( _bbs[inp->_idx] == bb ) { // Block-local use?
|
|
2081 |
if( inp->is_Proj() ) // Skip through Proj's
|
|
2082 |
inp = inp->in(0);
|
|
2083 |
++_uses[inp->_idx]; // Count 1 block-local use
|
|
2084 |
}
|
|
2085 |
}
|
|
2086 |
|
|
2087 |
// If this instruction has a 0 use count, then it is available
|
|
2088 |
if (!_uses[n->_idx]) {
|
|
2089 |
_current_latency[n->_idx] = _bundle_cycle_number;
|
|
2090 |
AddNodeToAvailableList(n);
|
|
2091 |
}
|
|
2092 |
|
|
2093 |
#ifndef PRODUCT
|
|
2094 |
if (_cfg->C->trace_opto_output()) {
|
|
2095 |
tty->print("# uses: %3d: ", _uses[n->_idx]);
|
|
2096 |
n->dump();
|
|
2097 |
}
|
|
2098 |
#endif
|
|
2099 |
}
|
|
2100 |
|
|
2101 |
#ifndef PRODUCT
|
|
2102 |
if (_cfg->C->trace_opto_output())
|
|
2103 |
tty->print("# <- ComputeUseCount\n");
|
|
2104 |
#endif
|
|
2105 |
}
|
|
2106 |
|
|
2107 |
// This routine performs scheduling on each basic block in reverse order,
|
|
2108 |
// using instruction latencies and taking into account function unit
|
|
2109 |
// availability.
|
|
2110 |
void Scheduling::DoScheduling() {
|
|
2111 |
#ifndef PRODUCT
|
|
2112 |
if (_cfg->C->trace_opto_output())
|
|
2113 |
tty->print("# -> DoScheduling\n");
|
|
2114 |
#endif
|
|
2115 |
|
|
2116 |
Block *succ_bb = NULL;
|
|
2117 |
Block *bb;
|
|
2118 |
|
|
2119 |
// Walk over all the basic blocks in reverse order
|
|
2120 |
for( int i=_cfg->_num_blocks-1; i >= 0; succ_bb = bb, i-- ) {
|
|
2121 |
bb = _cfg->_blocks[i];
|
|
2122 |
|
|
2123 |
#ifndef PRODUCT
|
|
2124 |
if (_cfg->C->trace_opto_output()) {
|
|
2125 |
tty->print("# Schedule BB#%03d (initial)\n", i);
|
|
2126 |
for (uint j = 0; j < bb->_nodes.size(); j++)
|
|
2127 |
bb->_nodes[j]->dump();
|
|
2128 |
}
|
|
2129 |
#endif
|
|
2130 |
|
|
2131 |
// On the head node, skip processing
|
|
2132 |
if( bb == _cfg->_broot )
|
|
2133 |
continue;
|
|
2134 |
|
|
2135 |
// Skip empty, connector blocks
|
|
2136 |
if (bb->is_connector())
|
|
2137 |
continue;
|
|
2138 |
|
|
2139 |
// If the following block is not the sole successor of
|
|
2140 |
// this one, then reset the pipeline information
|
|
2141 |
if (bb->_num_succs != 1 || bb->non_connector_successor(0) != succ_bb) {
|
|
2142 |
#ifndef PRODUCT
|
|
2143 |
if (_cfg->C->trace_opto_output()) {
|
|
2144 |
tty->print("*** bundle start of next BB, node %d, for %d instructions\n",
|
|
2145 |
_next_node->_idx, _bundle_instr_count);
|
|
2146 |
}
|
|
2147 |
#endif
|
|
2148 |
step_and_clear();
|
|
2149 |
}
|
|
2150 |
|
|
2151 |
// Leave untouched the starting instruction, any Phis, a CreateEx node
|
|
2152 |
// or Top. bb->_nodes[_bb_start] is the first schedulable instruction.
|
|
2153 |
_bb_end = bb->_nodes.size()-1;
|
|
2154 |
for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
|
|
2155 |
Node *n = bb->_nodes[_bb_start];
|
|
2156 |
// Things not matched, like Phinodes and ProjNodes don't get scheduled.
|
|
2157 |
// Also, MachIdealNodes do not get scheduled
|
|
2158 |
if( !n->is_Mach() ) continue; // Skip non-machine nodes
|
|
2159 |
MachNode *mach = n->as_Mach();
|
|
2160 |
int iop = mach->ideal_Opcode();
|
|
2161 |
if( iop == Op_CreateEx ) continue; // CreateEx is pinned
|
|
2162 |
if( iop == Op_Con ) continue; // Do not schedule Top
|
|
2163 |
if( iop == Op_Node && // Do not schedule PhiNodes, ProjNodes
|
|
2164 |
mach->pipeline() == MachNode::pipeline_class() &&
|
|
2165 |
!n->is_SpillCopy() ) // Breakpoints, Prolog, etc
|
|
2166 |
continue;
|
|
2167 |
break; // Funny loop structure to be sure...
|
|
2168 |
}
|
|
2169 |
// Compute last "interesting" instruction in block - last instruction we
|
|
2170 |
// might schedule. _bb_end points just after last schedulable inst. We
|
|
2171 |
// normally schedule conditional branches (despite them being forced last
|
|
2172 |
// in the block), because they have delay slots we can fill. Calls all
|
|
2173 |
// have their delay slots filled in the template expansions, so we don't
|
|
2174 |
// bother scheduling them.
|
|
2175 |
Node *last = bb->_nodes[_bb_end];
|
|
2176 |
if( last->is_Catch() ||
|
|
2177 |
(last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
|
|
2178 |
// There must be a prior call. Skip it.
|
|
2179 |
while( !bb->_nodes[--_bb_end]->is_Call() ) {
|
|
2180 |
assert( bb->_nodes[_bb_end]->is_Proj(), "skipping projections after expected call" );
|
|
2181 |
}
|
|
2182 |
} else if( last->is_MachNullCheck() ) {
|
|
2183 |
// Backup so the last null-checked memory instruction is
|
|
2184 |
// outside the schedulable range. Skip over the nullcheck,
|
|
2185 |
// projection, and the memory nodes.
|
|
2186 |
Node *mem = last->in(1);
|
|
2187 |
do {
|
|
2188 |
_bb_end--;
|
|
2189 |
} while (mem != bb->_nodes[_bb_end]);
|
|
2190 |
} else {
|
|
2191 |
// Set _bb_end to point after last schedulable inst.
|
|
2192 |
_bb_end++;
|
|
2193 |
}
|
|
2194 |
|
|
2195 |
assert( _bb_start <= _bb_end, "inverted block ends" );
|
|
2196 |
|
|
2197 |
// Compute the register antidependencies for the basic block
|
|
2198 |
ComputeRegisterAntidependencies(bb);
|
|
2199 |
if (_cfg->C->failing()) return; // too many D-U pinch points
|
|
2200 |
|
|
2201 |
// Compute intra-bb latencies for the nodes
|
|
2202 |
ComputeLocalLatenciesForward(bb);
|
|
2203 |
|
|
2204 |
// Compute the usage within the block, and set the list of all nodes
|
|
2205 |
// in the block that have no uses within the block.
|
|
2206 |
ComputeUseCount(bb);
|
|
2207 |
|
|
2208 |
// Schedule the remaining instructions in the block
|
|
2209 |
while ( _available.size() > 0 ) {
|
|
2210 |
Node *n = ChooseNodeToBundle();
|
|
2211 |
AddNodeToBundle(n,bb);
|
|
2212 |
}
|
|
2213 |
|
|
2214 |
assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
|
|
2215 |
#ifdef ASSERT
|
|
2216 |
for( uint l = _bb_start; l < _bb_end; l++ ) {
|
|
2217 |
Node *n = bb->_nodes[l];
|
|
2218 |
uint m;
|
|
2219 |
for( m = 0; m < _bb_end-_bb_start; m++ )
|
|
2220 |
if( _scheduled[m] == n )
|
|
2221 |
break;
|
|
2222 |
assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
|
|
2223 |
}
|
|
2224 |
#endif
|
|
2225 |
|
|
2226 |
// Now copy the instructions (in reverse order) back to the block
|
|
2227 |
for ( uint k = _bb_start; k < _bb_end; k++ )
|
|
2228 |
bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
|
|
2229 |
|
|
2230 |
#ifndef PRODUCT
|
|
2231 |
if (_cfg->C->trace_opto_output()) {
|
|
2232 |
tty->print("# Schedule BB#%03d (final)\n", i);
|
|
2233 |
uint current = 0;
|
|
2234 |
for (uint j = 0; j < bb->_nodes.size(); j++) {
|
|
2235 |
Node *n = bb->_nodes[j];
|
|
2236 |
if( valid_bundle_info(n) ) {
|
|
2237 |
Bundle *bundle = node_bundling(n);
|
|
2238 |
if (bundle->instr_count() > 0 || bundle->flags() > 0) {
|
|
2239 |
tty->print("*** Bundle: ");
|
|
2240 |
bundle->dump();
|
|
2241 |
}
|
|
2242 |
n->dump();
|
|
2243 |
}
|
|
2244 |
}
|
|
2245 |
}
|
|
2246 |
#endif
|
|
2247 |
#ifdef ASSERT
|
|
2248 |
verify_good_schedule(bb,"after block local scheduling");
|
|
2249 |
#endif
|
|
2250 |
}
|
|
2251 |
|
|
2252 |
#ifndef PRODUCT
|
|
2253 |
if (_cfg->C->trace_opto_output())
|
|
2254 |
tty->print("# <- DoScheduling\n");
|
|
2255 |
#endif
|
|
2256 |
|
|
2257 |
// Record final node-bundling array location
|
|
2258 |
_regalloc->C->set_node_bundling_base(_node_bundling_base);
|
|
2259 |
|
|
2260 |
} // end DoScheduling
|
|
2261 |
|
|
2262 |
//------------------------------verify_good_schedule---------------------------
|
|
2263 |
// Verify that no live-range used in the block is killed in the block by a
|
|
2264 |
// wrong DEF. This doesn't verify live-ranges that span blocks.
|
|
2265 |
|
|
2266 |
// Check for edge existence. Used to avoid adding redundant precedence edges.
|
|
2267 |
static bool edge_from_to( Node *from, Node *to ) {
|
|
2268 |
for( uint i=0; i<from->len(); i++ )
|
|
2269 |
if( from->in(i) == to )
|
|
2270 |
return true;
|
|
2271 |
return false;
|
|
2272 |
}
|
|
2273 |
|
|
2274 |
#ifdef ASSERT
|
|
2275 |
//------------------------------verify_do_def----------------------------------
|
|
2276 |
void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
|
|
2277 |
// Check for bad kills
|
|
2278 |
if( OptoReg::is_valid(def) ) { // Ignore stores & control flow
|
|
2279 |
Node *prior_use = _reg_node[def];
|
|
2280 |
if( prior_use && !edge_from_to(prior_use,n) ) {
|
|
2281 |
tty->print("%s = ",OptoReg::as_VMReg(def)->name());
|
|
2282 |
n->dump();
|
|
2283 |
tty->print_cr("...");
|
|
2284 |
prior_use->dump();
|
|
2285 |
assert_msg(edge_from_to(prior_use,n),msg);
|
|
2286 |
}
|
|
2287 |
_reg_node.map(def,NULL); // Kill live USEs
|
|
2288 |
}
|
|
2289 |
}
|
|
2290 |
|
|
2291 |
//------------------------------verify_good_schedule---------------------------
|
|
2292 |
void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
|
|
2293 |
|
|
2294 |
// Zap to something reasonable for the verify code
|
|
2295 |
_reg_node.clear();
|
|
2296 |
|
|
2297 |
// Walk over the block backwards. Check to make sure each DEF doesn't
|
|
2298 |
// kill a live value (other than the one it's supposed to). Add each
|
|
2299 |
// USE to the live set.
|
|
2300 |
for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
|
|
2301 |
Node *n = b->_nodes[i];
|
|
2302 |
int n_op = n->Opcode();
|
|
2303 |
if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
|
|
2304 |
// Fat-proj kills a slew of registers
|
|
2305 |
RegMask rm = n->out_RegMask();// Make local copy
|
|
2306 |
while( rm.is_NotEmpty() ) {
|
|
2307 |
OptoReg::Name kill = rm.find_first_elem();
|
|
2308 |
rm.Remove(kill);
|
|
2309 |
verify_do_def( n, kill, msg );
|
|
2310 |
}
|
|
2311 |
} else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
|
|
2312 |
// Get DEF'd registers the normal way
|
|
2313 |
verify_do_def( n, _regalloc->get_reg_first(n), msg );
|
|
2314 |
verify_do_def( n, _regalloc->get_reg_second(n), msg );
|
|
2315 |
}
|
|
2316 |
|
|
2317 |
// Now make all USEs live
|
|
2318 |
for( uint i=1; i<n->req(); i++ ) {
|
|
2319 |
Node *def = n->in(i);
|
|
2320 |
assert(def != 0, "input edge required");
|
|
2321 |
OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
|
|
2322 |
OptoReg::Name reg_hi = _regalloc->get_reg_second(def);
|
|
2323 |
if( OptoReg::is_valid(reg_lo) ) {
|
|
2324 |
assert_msg(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg );
|
|
2325 |
_reg_node.map(reg_lo,n);
|
|
2326 |
}
|
|
2327 |
if( OptoReg::is_valid(reg_hi) ) {
|
|
2328 |
assert_msg(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg );
|
|
2329 |
_reg_node.map(reg_hi,n);
|
|
2330 |
}
|
|
2331 |
}
|
|
2332 |
|
|
2333 |
}
|
|
2334 |
|
|
2335 |
// Zap to something reasonable for the Antidependence code
|
|
2336 |
_reg_node.clear();
|
|
2337 |
}
|
|
2338 |
#endif
|
|
2339 |
|
|
2340 |
// Conditionally add precedence edges. Avoid putting edges on Projs.
|
|
2341 |
static void add_prec_edge_from_to( Node *from, Node *to ) {
|
|
2342 |
if( from->is_Proj() ) { // Put precedence edge on Proj's input
|
|
2343 |
assert( from->req() == 1 && (from->len() == 1 || from->in(1)==0), "no precedence edges on projections" );
|
|
2344 |
from = from->in(0);
|
|
2345 |
}
|
|
2346 |
if( from != to && // No cycles (for things like LD L0,[L0+4] )
|
|
2347 |
!edge_from_to( from, to ) ) // Avoid duplicate edge
|
|
2348 |
from->add_prec(to);
|
|
2349 |
}
|
|
2350 |
|
|
2351 |
//------------------------------anti_do_def------------------------------------
|
|
2352 |
void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
|
|
2353 |
if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
|
|
2354 |
return;
|
|
2355 |
|
|
2356 |
Node *pinch = _reg_node[def_reg]; // Get pinch point
|
|
2357 |
if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet?
|
|
2358 |
is_def ) { // Check for a true def (not a kill)
|
|
2359 |
_reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
|
|
2360 |
return;
|
|
2361 |
}
|
|
2362 |
|
|
2363 |
Node *kill = def; // Rename 'def' to more descriptive 'kill'
|
|
2364 |
debug_only( def = (Node*)0xdeadbeef; )
|
|
2365 |
|
|
2366 |
// After some number of kills there _may_ be a later def
|
|
2367 |
Node *later_def = NULL;
|
|
2368 |
|
|
2369 |
// Finding a kill requires a real pinch-point.
|
|
2370 |
// Check for not already having a pinch-point.
|
|
2371 |
// Pinch points are Op_Node's.
|
|
2372 |
if( pinch->Opcode() != Op_Node ) { // Or later-def/kill as pinch-point?
|
|
2373 |
later_def = pinch; // Must be def/kill as optimistic pinch-point
|
|
2374 |
if ( _pinch_free_list.size() > 0) {
|
|
2375 |
pinch = _pinch_free_list.pop();
|
|
2376 |
} else {
|
|
2377 |
pinch = new (_cfg->C, 1) Node(1); // Pinch point to-be
|
|
2378 |
}
|
|
2379 |
if (pinch->_idx >= _regalloc->node_regs_max_index()) {
|
|
2380 |
_cfg->C->record_method_not_compilable("too many D-U pinch points");
|
|
2381 |
return;
|
|
2382 |
}
|
|
2383 |
_bbs.map(pinch->_idx,b); // Pretend it's valid in this block (lazy init)
|
|
2384 |
_reg_node.map(def_reg,pinch); // Record pinch-point
|
|
2385 |
//_regalloc->set_bad(pinch->_idx); // Already initialized this way.
|
|
2386 |
if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
|
|
2387 |
pinch->init_req(0, _cfg->C->top()); // set not NULL for the next call
|
|
2388 |
add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
|
|
2389 |
later_def = NULL; // and no later def
|
|
2390 |
}
|
|
2391 |
pinch->set_req(0,later_def); // Hook later def so we can find it
|
|
2392 |
} else { // Else have valid pinch point
|
|
2393 |
if( pinch->in(0) ) // If there is a later-def
|
|
2394 |
later_def = pinch->in(0); // Get it
|
|
2395 |
}
|
|
2396 |
|
|
2397 |
// Add output-dependence edge from later def to kill
|
|
2398 |
if( later_def ) // If there is some original def
|
|
2399 |
add_prec_edge_from_to(later_def,kill); // Add edge from def to kill
|
|
2400 |
|
|
2401 |
// See if current kill is also a use, and so is forced to be the pinch-point.
|
|
2402 |
if( pinch->Opcode() == Op_Node ) {
|
|
2403 |
Node *uses = kill->is_Proj() ? kill->in(0) : kill;
|
|
2404 |
for( uint i=1; i<uses->req(); i++ ) {
|
|
2405 |
if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
|
|
2406 |
_regalloc->get_reg_second(uses->in(i)) == def_reg ) {
|
|
2407 |
// Yes, found a use/kill pinch-point
|
|
2408 |
pinch->set_req(0,NULL); //
|
|
2409 |
pinch->replace_by(kill); // Move anti-dep edges up
|
|
2410 |
pinch = kill;
|
|
2411 |
_reg_node.map(def_reg,pinch);
|
|
2412 |
return;
|
|
2413 |
}
|
|
2414 |
}
|
|
2415 |
}
|
|
2416 |
|
|
2417 |
// Add edge from kill to pinch-point
|
|
2418 |
add_prec_edge_from_to(kill,pinch);
|
|
2419 |
}
|
|
2420 |
|
|
2421 |
//------------------------------anti_do_use------------------------------------
|
|
2422 |
void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
|
|
2423 |
if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
|
|
2424 |
return;
|
|
2425 |
Node *pinch = _reg_node[use_reg]; // Get pinch point
|
|
2426 |
// Check for no later def_reg/kill in block
|
|
2427 |
if( pinch && _bbs[pinch->_idx] == b &&
|
|
2428 |
// Use has to be block-local as well
|
|
2429 |
_bbs[use->_idx] == b ) {
|
|
2430 |
if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
|
|
2431 |
pinch->req() == 1 ) { // pinch not yet in block?
|
|
2432 |
pinch->del_req(0); // yank pointer to later-def, also set flag
|
|
2433 |
// Insert the pinch-point in the block just after the last use
|
|
2434 |
b->_nodes.insert(b->find_node(use)+1,pinch);
|
|
2435 |
_bb_end++; // Increase size scheduled region in block
|
|
2436 |
}
|
|
2437 |
|
|
2438 |
add_prec_edge_from_to(pinch,use);
|
|
2439 |
}
|
|
2440 |
}
|
|
2441 |
|
|
2442 |
//------------------------------ComputeRegisterAntidependences-----------------
|
|
2443 |
// We insert antidependences between the reads and following write of
|
|
2444 |
// allocated registers to prevent illegal code motion. Hopefully, the
|
|
2445 |
// number of added references should be fairly small, especially as we
|
|
2446 |
// are only adding references within the current basic block.
|
|
2447 |
void Scheduling::ComputeRegisterAntidependencies(Block *b) {
|
|
2448 |
|
|
2449 |
#ifdef ASSERT
|
|
2450 |
verify_good_schedule(b,"before block local scheduling");
|
|
2451 |
#endif
|
|
2452 |
|
|
2453 |
// A valid schedule, for each register independently, is an endless cycle
|
|
2454 |
// of: a def, then some uses (connected to the def by true dependencies),
|
|
2455 |
// then some kills (defs with no uses), finally the cycle repeats with a new
|
|
2456 |
// def. The uses are allowed to float relative to each other, as are the
|
|
2457 |
// kills. No use is allowed to slide past a kill (or def). This requires
|
|
2458 |
// antidependencies between all uses of a single def and all kills that
|
|
2459 |
// follow, up to the next def. More edges are redundant, because later defs
|
|
2460 |
// & kills are already serialized with true or antidependencies. To keep
|
|
2461 |
// the edge count down, we add a 'pinch point' node if there's more than
|
|
2462 |
// one use or more than one kill/def.
|
|
2463 |
|
|
2464 |
// We add dependencies in one bottom-up pass.
|
|
2465 |
|
|
2466 |
// For each instruction we handle it's DEFs/KILLs, then it's USEs.
|
|
2467 |
|
|
2468 |
// For each DEF/KILL, we check to see if there's a prior DEF/KILL for this
|
|
2469 |
// register. If not, we record the DEF/KILL in _reg_node, the
|
|
2470 |
// register-to-def mapping. If there is a prior DEF/KILL, we insert a
|
|
2471 |
// "pinch point", a new Node that's in the graph but not in the block.
|
|
2472 |
// We put edges from the prior and current DEF/KILLs to the pinch point.
|
|
2473 |
// We put the pinch point in _reg_node. If there's already a pinch point
|
|
2474 |
// we merely add an edge from the current DEF/KILL to the pinch point.
|
|
2475 |
|
|
2476 |
// After doing the DEF/KILLs, we handle USEs. For each used register, we
|
|
2477 |
// put an edge from the pinch point to the USE.
|
|
2478 |
|
|
2479 |
// To be expedient, the _reg_node array is pre-allocated for the whole
|
|
2480 |
// compilation. _reg_node is lazily initialized; it either contains a NULL,
|
|
2481 |
// or a valid def/kill/pinch-point, or a leftover node from some prior
|
|
2482 |
// block. Leftover node from some prior block is treated like a NULL (no
|
|
2483 |
// prior def, so no anti-dependence needed). Valid def is distinguished by
|
|
2484 |
// it being in the current block.
|
|
2485 |
bool fat_proj_seen = false;
|
|
2486 |
uint last_safept = _bb_end-1;
|
|
2487 |
Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
|
|
2488 |
Node* last_safept_node = end_node;
|
|
2489 |
for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
|
|
2490 |
Node *n = b->_nodes[i];
|
|
2491 |
int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
|
|
2492 |
if( n->Opcode() == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
|
|
2493 |
// Fat-proj kills a slew of registers
|
|
2494 |
// This can add edges to 'n' and obscure whether or not it was a def,
|
|
2495 |
// hence the is_def flag.
|
|
2496 |
fat_proj_seen = true;
|
|
2497 |
RegMask rm = n->out_RegMask();// Make local copy
|
|
2498 |
while( rm.is_NotEmpty() ) {
|
|
2499 |
OptoReg::Name kill = rm.find_first_elem();
|
|
2500 |
rm.Remove(kill);
|
|
2501 |
anti_do_def( b, n, kill, is_def );
|
|
2502 |
}
|
|
2503 |
} else {
|
|
2504 |
// Get DEF'd registers the normal way
|
|
2505 |
anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
|
|
2506 |
anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
|
|
2507 |
}
|
|
2508 |
|
|
2509 |
// Check each register used by this instruction for a following DEF/KILL
|
|
2510 |
// that must occur afterward and requires an anti-dependence edge.
|
|
2511 |
for( uint j=0; j<n->req(); j++ ) {
|
|
2512 |
Node *def = n->in(j);
|
|
2513 |
if( def ) {
|
|
2514 |
assert( def->Opcode() != Op_MachProj || def->ideal_reg() != MachProjNode::fat_proj, "" );
|
|
2515 |
anti_do_use( b, n, _regalloc->get_reg_first(def) );
|
|
2516 |
anti_do_use( b, n, _regalloc->get_reg_second(def) );
|
|
2517 |
}
|
|
2518 |
}
|
|
2519 |
// Do not allow defs of new derived values to float above GC
|
|
2520 |
// points unless the base is definitely available at the GC point.
|
|
2521 |
|
|
2522 |
Node *m = b->_nodes[i];
|
|
2523 |
|
|
2524 |
// Add precedence edge from following safepoint to use of derived pointer
|
|
2525 |
if( last_safept_node != end_node &&
|
|
2526 |
m != last_safept_node) {
|
|
2527 |
for (uint k = 1; k < m->req(); k++) {
|
|
2528 |
const Type *t = m->in(k)->bottom_type();
|
|
2529 |
if( t->isa_oop_ptr() &&
|
|
2530 |
t->is_ptr()->offset() != 0 ) {
|
|
2531 |
last_safept_node->add_prec( m );
|
|
2532 |
break;
|
|
2533 |
}
|
|
2534 |
}
|
|
2535 |
}
|
|
2536 |
|
|
2537 |
if( n->jvms() ) { // Precedence edge from derived to safept
|
|
2538 |
// Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
|
|
2539 |
if( b->_nodes[last_safept] != last_safept_node ) {
|
|
2540 |
last_safept = b->find_node(last_safept_node);
|
|
2541 |
}
|
|
2542 |
for( uint j=last_safept; j > i; j-- ) {
|
|
2543 |
Node *mach = b->_nodes[j];
|
|
2544 |
if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
|
|
2545 |
mach->add_prec( n );
|
|
2546 |
}
|
|
2547 |
last_safept = i;
|
|
2548 |
last_safept_node = m;
|
|
2549 |
}
|
|
2550 |
}
|
|
2551 |
|
|
2552 |
if (fat_proj_seen) {
|
|
2553 |
// Garbage collect pinch nodes that were not consumed.
|
|
2554 |
// They are usually created by a fat kill MachProj for a call.
|
|
2555 |
garbage_collect_pinch_nodes();
|
|
2556 |
}
|
|
2557 |
}
|
|
2558 |
|
|
2559 |
//------------------------------garbage_collect_pinch_nodes-------------------------------
|
|
2560 |
|
|
2561 |
// Garbage collect pinch nodes for reuse by other blocks.
|
|
2562 |
//
|
|
2563 |
// The block scheduler's insertion of anti-dependence
|
|
2564 |
// edges creates many pinch nodes when the block contains
|
|
2565 |
// 2 or more Calls. A pinch node is used to prevent a
|
|
2566 |
// combinatorial explosion of edges. If a set of kills for a
|
|
2567 |
// register is anti-dependent on a set of uses (or defs), rather
|
|
2568 |
// than adding an edge in the graph between each pair of kill
|
|
2569 |
// and use (or def), a pinch is inserted between them:
|
|
2570 |
//
|
|
2571 |
// use1 use2 use3
|
|
2572 |
// \ | /
|
|
2573 |
// \ | /
|
|
2574 |
// pinch
|
|
2575 |
// / | \
|
|
2576 |
// / | \
|
|
2577 |
// kill1 kill2 kill3
|
|
2578 |
//
|
|
2579 |
// One pinch node is created per register killed when
|
|
2580 |
// the second call is encountered during a backwards pass
|
|
2581 |
// over the block. Most of these pinch nodes are never
|
|
2582 |
// wired into the graph because the register is never
|
|
2583 |
// used or def'ed in the block.
|
|
2584 |
//
|
|
2585 |
void Scheduling::garbage_collect_pinch_nodes() {
|
|
2586 |
#ifndef PRODUCT
|
|
2587 |
if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
|
|
2588 |
#endif
|
|
2589 |
int trace_cnt = 0;
|
|
2590 |
for (uint k = 0; k < _reg_node.Size(); k++) {
|
|
2591 |
Node* pinch = _reg_node[k];
|
|
2592 |
if (pinch != NULL && pinch->Opcode() == Op_Node &&
|
|
2593 |
// no predecence input edges
|
|
2594 |
(pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
|
|
2595 |
cleanup_pinch(pinch);
|
|
2596 |
_pinch_free_list.push(pinch);
|
|
2597 |
_reg_node.map(k, NULL);
|
|
2598 |
#ifndef PRODUCT
|
|
2599 |
if (_cfg->C->trace_opto_output()) {
|
|
2600 |
trace_cnt++;
|
|
2601 |
if (trace_cnt > 40) {
|
|
2602 |
tty->print("\n");
|
|
2603 |
trace_cnt = 0;
|
|
2604 |
}
|
|
2605 |
tty->print(" %d", pinch->_idx);
|
|
2606 |
}
|
|
2607 |
#endif
|
|
2608 |
}
|
|
2609 |
}
|
|
2610 |
#ifndef PRODUCT
|
|
2611 |
if (_cfg->C->trace_opto_output()) tty->print("\n");
|
|
2612 |
#endif
|
|
2613 |
}
|
|
2614 |
|
|
2615 |
// Clean up a pinch node for reuse.
|
|
2616 |
void Scheduling::cleanup_pinch( Node *pinch ) {
|
|
2617 |
assert (pinch && pinch->Opcode() == Op_Node && pinch->req() == 1, "just checking");
|
|
2618 |
|
|
2619 |
for (DUIterator_Last imin, i = pinch->last_outs(imin); i >= imin; ) {
|
|
2620 |
Node* use = pinch->last_out(i);
|
|
2621 |
uint uses_found = 0;
|
|
2622 |
for (uint j = use->req(); j < use->len(); j++) {
|
|
2623 |
if (use->in(j) == pinch) {
|
|
2624 |
use->rm_prec(j);
|
|
2625 |
uses_found++;
|
|
2626 |
}
|
|
2627 |
}
|
|
2628 |
assert(uses_found > 0, "must be a precedence edge");
|
|
2629 |
i -= uses_found; // we deleted 1 or more copies of this edge
|
|
2630 |
}
|
|
2631 |
// May have a later_def entry
|
|
2632 |
pinch->set_req(0, NULL);
|
|
2633 |
}
|
|
2634 |
|
|
2635 |
//------------------------------print_statistics-------------------------------
|
|
2636 |
#ifndef PRODUCT
|
|
2637 |
|
|
2638 |
void Scheduling::dump_available() const {
|
|
2639 |
tty->print("#Availist ");
|
|
2640 |
for (uint i = 0; i < _available.size(); i++)
|
|
2641 |
tty->print(" N%d/l%d", _available[i]->_idx,_current_latency[_available[i]->_idx]);
|
|
2642 |
tty->cr();
|
|
2643 |
}
|
|
2644 |
|
|
2645 |
// Print Scheduling Statistics
|
|
2646 |
void Scheduling::print_statistics() {
|
|
2647 |
// Print the size added by nops for bundling
|
|
2648 |
tty->print("Nops added %d bytes to total of %d bytes",
|
|
2649 |
_total_nop_size, _total_method_size);
|
|
2650 |
if (_total_method_size > 0)
|
|
2651 |
tty->print(", for %.2f%%",
|
|
2652 |
((double)_total_nop_size) / ((double) _total_method_size) * 100.0);
|
|
2653 |
tty->print("\n");
|
|
2654 |
|
|
2655 |
// Print the number of branch shadows filled
|
|
2656 |
if (Pipeline::_branch_has_delay_slot) {
|
|
2657 |
tty->print("Of %d branches, %d had unconditional delay slots filled",
|
|
2658 |
_total_branches, _total_unconditional_delays);
|
|
2659 |
if (_total_branches > 0)
|
|
2660 |
tty->print(", for %.2f%%",
|
|
2661 |
((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0);
|
|
2662 |
tty->print("\n");
|
|
2663 |
}
|
|
2664 |
|
|
2665 |
uint total_instructions = 0, total_bundles = 0;
|
|
2666 |
|
|
2667 |
for (uint i = 1; i <= Pipeline::_max_instrs_per_cycle; i++) {
|
|
2668 |
uint bundle_count = _total_instructions_per_bundle[i];
|
|
2669 |
total_instructions += bundle_count * i;
|
|
2670 |
total_bundles += bundle_count;
|
|
2671 |
}
|
|
2672 |
|
|
2673 |
if (total_bundles > 0)
|
|
2674 |
tty->print("Average ILP (excluding nops) is %.2f\n",
|
|
2675 |
((double)total_instructions) / ((double)total_bundles));
|
|
2676 |
}
|
|
2677 |
#endif
|