author | never |
Tue, 28 Oct 2008 09:31:30 -0700 | |
changeset 1495 | 128fe18951ed |
parent 1400 | afd034bb8c2e |
child 1497 | cd3234c89e59 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
670 | 2 |
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "incls/_precompiled.incl" |
|
26 |
#include "incls/_compile.cpp.incl" |
|
27 |
||
28 |
/// Support for intrinsics. |
|
29 |
||
30 |
// Return the index at which m must be inserted (or already exists). |
|
31 |
// The sort order is by the address of the ciMethod, with is_virtual as minor key. |
|
32 |
int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) { |
|
33 |
#ifdef ASSERT |
|
34 |
for (int i = 1; i < _intrinsics->length(); i++) { |
|
35 |
CallGenerator* cg1 = _intrinsics->at(i-1); |
|
36 |
CallGenerator* cg2 = _intrinsics->at(i); |
|
37 |
assert(cg1->method() != cg2->method() |
|
38 |
? cg1->method() < cg2->method() |
|
39 |
: cg1->is_virtual() < cg2->is_virtual(), |
|
40 |
"compiler intrinsics list must stay sorted"); |
|
41 |
} |
|
42 |
#endif |
|
43 |
// Binary search sorted list, in decreasing intervals [lo, hi]. |
|
44 |
int lo = 0, hi = _intrinsics->length()-1; |
|
45 |
while (lo <= hi) { |
|
46 |
int mid = (uint)(hi + lo) / 2; |
|
47 |
ciMethod* mid_m = _intrinsics->at(mid)->method(); |
|
48 |
if (m < mid_m) { |
|
49 |
hi = mid-1; |
|
50 |
} else if (m > mid_m) { |
|
51 |
lo = mid+1; |
|
52 |
} else { |
|
53 |
// look at minor sort key |
|
54 |
bool mid_virt = _intrinsics->at(mid)->is_virtual(); |
|
55 |
if (is_virtual < mid_virt) { |
|
56 |
hi = mid-1; |
|
57 |
} else if (is_virtual > mid_virt) { |
|
58 |
lo = mid+1; |
|
59 |
} else { |
|
60 |
return mid; // exact match |
|
61 |
} |
|
62 |
} |
|
63 |
} |
|
64 |
return lo; // inexact match |
|
65 |
} |
|
66 |
||
67 |
void Compile::register_intrinsic(CallGenerator* cg) { |
|
68 |
if (_intrinsics == NULL) { |
|
69 |
_intrinsics = new GrowableArray<CallGenerator*>(60); |
|
70 |
} |
|
71 |
// This code is stolen from ciObjectFactory::insert. |
|
72 |
// Really, GrowableArray should have methods for |
|
73 |
// insert_at, remove_at, and binary_search. |
|
74 |
int len = _intrinsics->length(); |
|
75 |
int index = intrinsic_insertion_index(cg->method(), cg->is_virtual()); |
|
76 |
if (index == len) { |
|
77 |
_intrinsics->append(cg); |
|
78 |
} else { |
|
79 |
#ifdef ASSERT |
|
80 |
CallGenerator* oldcg = _intrinsics->at(index); |
|
81 |
assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice"); |
|
82 |
#endif |
|
83 |
_intrinsics->append(_intrinsics->at(len-1)); |
|
84 |
int pos; |
|
85 |
for (pos = len-2; pos >= index; pos--) { |
|
86 |
_intrinsics->at_put(pos+1,_intrinsics->at(pos)); |
|
87 |
} |
|
88 |
_intrinsics->at_put(index, cg); |
|
89 |
} |
|
90 |
assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked"); |
|
91 |
} |
|
92 |
||
93 |
CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) { |
|
94 |
assert(m->is_loaded(), "don't try this on unloaded methods"); |
|
95 |
if (_intrinsics != NULL) { |
|
96 |
int index = intrinsic_insertion_index(m, is_virtual); |
|
97 |
if (index < _intrinsics->length() |
|
98 |
&& _intrinsics->at(index)->method() == m |
|
99 |
&& _intrinsics->at(index)->is_virtual() == is_virtual) { |
|
100 |
return _intrinsics->at(index); |
|
101 |
} |
|
102 |
} |
|
103 |
// Lazily create intrinsics for intrinsic IDs well-known in the runtime. |
|
104 |
if (m->intrinsic_id() != vmIntrinsics::_none) { |
|
105 |
CallGenerator* cg = make_vm_intrinsic(m, is_virtual); |
|
106 |
if (cg != NULL) { |
|
107 |
// Save it for next time: |
|
108 |
register_intrinsic(cg); |
|
109 |
return cg; |
|
110 |
} else { |
|
111 |
gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled); |
|
112 |
} |
|
113 |
} |
|
114 |
return NULL; |
|
115 |
} |
|
116 |
||
117 |
// Compile:: register_library_intrinsics and make_vm_intrinsic are defined |
|
118 |
// in library_call.cpp. |
|
119 |
||
120 |
||
121 |
#ifndef PRODUCT |
|
122 |
// statistics gathering... |
|
123 |
||
124 |
juint Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0}; |
|
125 |
jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0}; |
|
126 |
||
127 |
bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) { |
|
128 |
assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob"); |
|
129 |
int oflags = _intrinsic_hist_flags[id]; |
|
130 |
assert(flags != 0, "what happened?"); |
|
131 |
if (is_virtual) { |
|
132 |
flags |= _intrinsic_virtual; |
|
133 |
} |
|
134 |
bool changed = (flags != oflags); |
|
135 |
if ((flags & _intrinsic_worked) != 0) { |
|
136 |
juint count = (_intrinsic_hist_count[id] += 1); |
|
137 |
if (count == 1) { |
|
138 |
changed = true; // first time |
|
139 |
} |
|
140 |
// increment the overall count also: |
|
141 |
_intrinsic_hist_count[vmIntrinsics::_none] += 1; |
|
142 |
} |
|
143 |
if (changed) { |
|
144 |
if (((oflags ^ flags) & _intrinsic_virtual) != 0) { |
|
145 |
// Something changed about the intrinsic's virtuality. |
|
146 |
if ((flags & _intrinsic_virtual) != 0) { |
|
147 |
// This is the first use of this intrinsic as a virtual call. |
|
148 |
if (oflags != 0) { |
|
149 |
// We already saw it as a non-virtual, so note both cases. |
|
150 |
flags |= _intrinsic_both; |
|
151 |
} |
|
152 |
} else if ((oflags & _intrinsic_both) == 0) { |
|
153 |
// This is the first use of this intrinsic as a non-virtual |
|
154 |
flags |= _intrinsic_both; |
|
155 |
} |
|
156 |
} |
|
157 |
_intrinsic_hist_flags[id] = (jubyte) (oflags | flags); |
|
158 |
} |
|
159 |
// update the overall flags also: |
|
160 |
_intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags; |
|
161 |
return changed; |
|
162 |
} |
|
163 |
||
164 |
static char* format_flags(int flags, char* buf) { |
|
165 |
buf[0] = 0; |
|
166 |
if ((flags & Compile::_intrinsic_worked) != 0) strcat(buf, ",worked"); |
|
167 |
if ((flags & Compile::_intrinsic_failed) != 0) strcat(buf, ",failed"); |
|
168 |
if ((flags & Compile::_intrinsic_disabled) != 0) strcat(buf, ",disabled"); |
|
169 |
if ((flags & Compile::_intrinsic_virtual) != 0) strcat(buf, ",virtual"); |
|
170 |
if ((flags & Compile::_intrinsic_both) != 0) strcat(buf, ",nonvirtual"); |
|
171 |
if (buf[0] == 0) strcat(buf, ","); |
|
172 |
assert(buf[0] == ',', "must be"); |
|
173 |
return &buf[1]; |
|
174 |
} |
|
175 |
||
176 |
void Compile::print_intrinsic_statistics() { |
|
177 |
char flagsbuf[100]; |
|
178 |
ttyLocker ttyl; |
|
179 |
if (xtty != NULL) xtty->head("statistics type='intrinsic'"); |
|
180 |
tty->print_cr("Compiler intrinsic usage:"); |
|
181 |
juint total = _intrinsic_hist_count[vmIntrinsics::_none]; |
|
182 |
if (total == 0) total = 1; // avoid div0 in case of no successes |
|
183 |
#define PRINT_STAT_LINE(name, c, f) \ |
|
184 |
tty->print_cr(" %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f); |
|
185 |
for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) { |
|
186 |
vmIntrinsics::ID id = (vmIntrinsics::ID) index; |
|
187 |
int flags = _intrinsic_hist_flags[id]; |
|
188 |
juint count = _intrinsic_hist_count[id]; |
|
189 |
if ((flags | count) != 0) { |
|
190 |
PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf)); |
|
191 |
} |
|
192 |
} |
|
193 |
PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf)); |
|
194 |
if (xtty != NULL) xtty->tail("statistics"); |
|
195 |
} |
|
196 |
||
197 |
void Compile::print_statistics() { |
|
198 |
{ ttyLocker ttyl; |
|
199 |
if (xtty != NULL) xtty->head("statistics type='opto'"); |
|
200 |
Parse::print_statistics(); |
|
201 |
PhaseCCP::print_statistics(); |
|
202 |
PhaseRegAlloc::print_statistics(); |
|
203 |
Scheduling::print_statistics(); |
|
204 |
PhasePeephole::print_statistics(); |
|
205 |
PhaseIdealLoop::print_statistics(); |
|
206 |
if (xtty != NULL) xtty->tail("statistics"); |
|
207 |
} |
|
208 |
if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) { |
|
209 |
// put this under its own <statistics> element. |
|
210 |
print_intrinsic_statistics(); |
|
211 |
} |
|
212 |
} |
|
213 |
#endif //PRODUCT |
|
214 |
||
215 |
// Support for bundling info |
|
216 |
Bundle* Compile::node_bundling(const Node *n) { |
|
217 |
assert(valid_bundle_info(n), "oob"); |
|
218 |
return &_node_bundling_base[n->_idx]; |
|
219 |
} |
|
220 |
||
221 |
bool Compile::valid_bundle_info(const Node *n) { |
|
222 |
return (_node_bundling_limit > n->_idx); |
|
223 |
} |
|
224 |
||
225 |
||
226 |
// Identify all nodes that are reachable from below, useful. |
|
227 |
// Use breadth-first pass that records state in a Unique_Node_List, |
|
228 |
// recursive traversal is slower. |
|
229 |
void Compile::identify_useful_nodes(Unique_Node_List &useful) { |
|
230 |
int estimated_worklist_size = unique(); |
|
231 |
useful.map( estimated_worklist_size, NULL ); // preallocate space |
|
232 |
||
233 |
// Initialize worklist |
|
234 |
if (root() != NULL) { useful.push(root()); } |
|
235 |
// If 'top' is cached, declare it useful to preserve cached node |
|
236 |
if( cached_top_node() ) { useful.push(cached_top_node()); } |
|
237 |
||
238 |
// Push all useful nodes onto the list, breadthfirst |
|
239 |
for( uint next = 0; next < useful.size(); ++next ) { |
|
240 |
assert( next < unique(), "Unique useful nodes < total nodes"); |
|
241 |
Node *n = useful.at(next); |
|
242 |
uint max = n->len(); |
|
243 |
for( uint i = 0; i < max; ++i ) { |
|
244 |
Node *m = n->in(i); |
|
245 |
if( m == NULL ) continue; |
|
246 |
useful.push(m); |
|
247 |
} |
|
248 |
} |
|
249 |
} |
|
250 |
||
251 |
// Disconnect all useless nodes by disconnecting those at the boundary. |
|
252 |
void Compile::remove_useless_nodes(Unique_Node_List &useful) { |
|
253 |
uint next = 0; |
|
254 |
while( next < useful.size() ) { |
|
255 |
Node *n = useful.at(next++); |
|
256 |
// Use raw traversal of out edges since this code removes out edges |
|
257 |
int max = n->outcnt(); |
|
258 |
for (int j = 0; j < max; ++j ) { |
|
259 |
Node* child = n->raw_out(j); |
|
260 |
if( ! useful.member(child) ) { |
|
261 |
assert( !child->is_top() || child != top(), |
|
262 |
"If top is cached in Compile object it is in useful list"); |
|
263 |
// Only need to remove this out-edge to the useless node |
|
264 |
n->raw_del_out(j); |
|
265 |
--j; |
|
266 |
--max; |
|
267 |
} |
|
268 |
} |
|
269 |
if (n->outcnt() == 1 && n->has_special_unique_user()) { |
|
270 |
record_for_igvn( n->unique_out() ); |
|
271 |
} |
|
272 |
} |
|
273 |
debug_only(verify_graph_edges(true/*check for no_dead_code*/);) |
|
274 |
} |
|
275 |
||
276 |
//------------------------------frame_size_in_words----------------------------- |
|
277 |
// frame_slots in units of words |
|
278 |
int Compile::frame_size_in_words() const { |
|
279 |
// shift is 0 in LP32 and 1 in LP64 |
|
280 |
const int shift = (LogBytesPerWord - LogBytesPerInt); |
|
281 |
int words = _frame_slots >> shift; |
|
282 |
assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" ); |
|
283 |
return words; |
|
284 |
} |
|
285 |
||
286 |
// ============================================================================ |
|
287 |
//------------------------------CompileWrapper--------------------------------- |
|
288 |
class CompileWrapper : public StackObj { |
|
289 |
Compile *const _compile; |
|
290 |
public: |
|
291 |
CompileWrapper(Compile* compile); |
|
292 |
||
293 |
~CompileWrapper(); |
|
294 |
}; |
|
295 |
||
296 |
CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) { |
|
297 |
// the Compile* pointer is stored in the current ciEnv: |
|
298 |
ciEnv* env = compile->env(); |
|
299 |
assert(env == ciEnv::current(), "must already be a ciEnv active"); |
|
300 |
assert(env->compiler_data() == NULL, "compile already active?"); |
|
301 |
env->set_compiler_data(compile); |
|
302 |
assert(compile == Compile::current(), "sanity"); |
|
303 |
||
304 |
compile->set_type_dict(NULL); |
|
305 |
compile->set_type_hwm(NULL); |
|
306 |
compile->set_type_last_size(0); |
|
307 |
compile->set_last_tf(NULL, NULL); |
|
308 |
compile->set_indexSet_arena(NULL); |
|
309 |
compile->set_indexSet_free_block_list(NULL); |
|
310 |
compile->init_type_arena(); |
|
311 |
Type::Initialize(compile); |
|
312 |
_compile->set_scratch_buffer_blob(NULL); |
|
313 |
_compile->begin_method(); |
|
314 |
} |
|
315 |
CompileWrapper::~CompileWrapper() { |
|
316 |
_compile->end_method(); |
|
317 |
if (_compile->scratch_buffer_blob() != NULL) |
|
318 |
BufferBlob::free(_compile->scratch_buffer_blob()); |
|
319 |
_compile->env()->set_compiler_data(NULL); |
|
320 |
} |
|
321 |
||
322 |
||
323 |
//----------------------------print_compile_messages--------------------------- |
|
324 |
void Compile::print_compile_messages() { |
|
325 |
#ifndef PRODUCT |
|
326 |
// Check if recompiling |
|
327 |
if (_subsume_loads == false && PrintOpto) { |
|
328 |
// Recompiling without allowing machine instructions to subsume loads |
|
329 |
tty->print_cr("*********************************************************"); |
|
330 |
tty->print_cr("** Bailout: Recompile without subsuming loads **"); |
|
331 |
tty->print_cr("*********************************************************"); |
|
332 |
} |
|
211
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
333 |
if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) { |
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
334 |
// Recompiling without escape analysis |
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
335 |
tty->print_cr("*********************************************************"); |
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
336 |
tty->print_cr("** Bailout: Recompile without escape analysis **"); |
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
337 |
tty->print_cr("*********************************************************"); |
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
338 |
} |
1 | 339 |
if (env()->break_at_compile()) { |
340 |
// Open the debugger when compiing this method. |
|
341 |
tty->print("### Breaking when compiling: "); |
|
342 |
method()->print_short_name(); |
|
343 |
tty->cr(); |
|
344 |
BREAKPOINT; |
|
345 |
} |
|
346 |
||
347 |
if( PrintOpto ) { |
|
348 |
if (is_osr_compilation()) { |
|
349 |
tty->print("[OSR]%3d", _compile_id); |
|
350 |
} else { |
|
351 |
tty->print("%3d", _compile_id); |
|
352 |
} |
|
353 |
} |
|
354 |
#endif |
|
355 |
} |
|
356 |
||
357 |
||
358 |
void Compile::init_scratch_buffer_blob() { |
|
359 |
if( scratch_buffer_blob() != NULL ) return; |
|
360 |
||
361 |
// Construct a temporary CodeBuffer to have it construct a BufferBlob |
|
362 |
// Cache this BufferBlob for this compile. |
|
363 |
ResourceMark rm; |
|
364 |
int size = (MAX_inst_size + MAX_stubs_size + MAX_const_size); |
|
365 |
BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size); |
|
366 |
// Record the buffer blob for next time. |
|
367 |
set_scratch_buffer_blob(blob); |
|
589 | 368 |
// Have we run out of code space? |
369 |
if (scratch_buffer_blob() == NULL) { |
|
370 |
// Let CompilerBroker disable further compilations. |
|
371 |
record_failure("Not enough space for scratch buffer in CodeCache"); |
|
372 |
return; |
|
373 |
} |
|
1 | 374 |
|
375 |
// Initialize the relocation buffers |
|
376 |
relocInfo* locs_buf = (relocInfo*) blob->instructions_end() - MAX_locs_size; |
|
377 |
set_scratch_locs_memory(locs_buf); |
|
378 |
} |
|
379 |
||
380 |
||
381 |
//-----------------------scratch_emit_size------------------------------------- |
|
382 |
// Helper function that computes size by emitting code |
|
383 |
uint Compile::scratch_emit_size(const Node* n) { |
|
384 |
// Emit into a trash buffer and count bytes emitted. |
|
385 |
// This is a pretty expensive way to compute a size, |
|
386 |
// but it works well enough if seldom used. |
|
387 |
// All common fixed-size instructions are given a size |
|
388 |
// method by the AD file. |
|
389 |
// Note that the scratch buffer blob and locs memory are |
|
390 |
// allocated at the beginning of the compile task, and |
|
391 |
// may be shared by several calls to scratch_emit_size. |
|
392 |
// The allocation of the scratch buffer blob is particularly |
|
393 |
// expensive, since it has to grab the code cache lock. |
|
394 |
BufferBlob* blob = this->scratch_buffer_blob(); |
|
395 |
assert(blob != NULL, "Initialize BufferBlob at start"); |
|
396 |
assert(blob->size() > MAX_inst_size, "sanity"); |
|
397 |
relocInfo* locs_buf = scratch_locs_memory(); |
|
398 |
address blob_begin = blob->instructions_begin(); |
|
399 |
address blob_end = (address)locs_buf; |
|
400 |
assert(blob->instructions_contains(blob_end), "sanity"); |
|
401 |
CodeBuffer buf(blob_begin, blob_end - blob_begin); |
|
402 |
buf.initialize_consts_size(MAX_const_size); |
|
403 |
buf.initialize_stubs_size(MAX_stubs_size); |
|
404 |
assert(locs_buf != NULL, "sanity"); |
|
405 |
int lsize = MAX_locs_size / 2; |
|
406 |
buf.insts()->initialize_shared_locs(&locs_buf[0], lsize); |
|
407 |
buf.stubs()->initialize_shared_locs(&locs_buf[lsize], lsize); |
|
408 |
n->emit(buf, this->regalloc()); |
|
409 |
return buf.code_size(); |
|
410 |
} |
|
411 |
||
412 |
||
413 |
// ============================================================================ |
|
414 |
//------------------------------Compile standard------------------------------- |
|
415 |
debug_only( int Compile::_debug_idx = 100000; ) |
|
416 |
||
417 |
// Compile a method. entry_bci is -1 for normal compilations and indicates |
|
418 |
// the continuation bci for on stack replacement. |
|
419 |
||
420 |
||
211
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
421 |
Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis ) |
1 | 422 |
: Phase(Compiler), |
423 |
_env(ci_env), |
|
424 |
_log(ci_env->log()), |
|
425 |
_compile_id(ci_env->compile_id()), |
|
426 |
_save_argument_registers(false), |
|
427 |
_stub_name(NULL), |
|
428 |
_stub_function(NULL), |
|
429 |
_stub_entry_point(NULL), |
|
430 |
_method(target), |
|
431 |
_entry_bci(osr_bci), |
|
432 |
_initial_gvn(NULL), |
|
433 |
_for_igvn(NULL), |
|
434 |
_warm_calls(NULL), |
|
435 |
_subsume_loads(subsume_loads), |
|
211
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
436 |
_do_escape_analysis(do_escape_analysis), |
1 | 437 |
_failure_reason(NULL), |
438 |
_code_buffer("Compile::Fill_buffer"), |
|
439 |
_orig_pc_slot(0), |
|
440 |
_orig_pc_slot_offset_in_bytes(0), |
|
441 |
_node_bundling_limit(0), |
|
442 |
_node_bundling_base(NULL), |
|
443 |
#ifndef PRODUCT |
|
444 |
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")), |
|
445 |
_printer(IdealGraphPrinter::printer()), |
|
446 |
#endif |
|
447 |
_congraph(NULL) { |
|
448 |
C = this; |
|
449 |
||
450 |
CompileWrapper cw(this); |
|
451 |
#ifndef PRODUCT |
|
452 |
if (TimeCompiler2) { |
|
453 |
tty->print(" "); |
|
454 |
target->holder()->name()->print(); |
|
455 |
tty->print("."); |
|
456 |
target->print_short_name(); |
|
457 |
tty->print(" "); |
|
458 |
} |
|
459 |
TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2); |
|
460 |
TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false); |
|
347
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
461 |
bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly"); |
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
462 |
if (!print_opto_assembly) { |
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
463 |
bool print_assembly = (PrintAssembly || _method->should_print_assembly()); |
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
464 |
if (print_assembly && !Disassembler::can_decode()) { |
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
465 |
tty->print_cr("PrintAssembly request changed to PrintOptoAssembly"); |
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
466 |
print_opto_assembly = true; |
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
467 |
} |
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
468 |
} |
df859fcca515
6667042: PrintAssembly option does not work without special plugin
jrose
parents:
238
diff
changeset
|
469 |
set_print_assembly(print_opto_assembly); |
1399
9648dfd4ce09
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
1137
diff
changeset
|
470 |
set_parsed_irreducible_loop(false); |
1 | 471 |
#endif |
472 |
||
473 |
if (ProfileTraps) { |
|
474 |
// Make sure the method being compiled gets its own MDO, |
|
475 |
// so we can at least track the decompile_count(). |
|
476 |
method()->build_method_data(); |
|
477 |
} |
|
478 |
||
479 |
Init(::AliasLevel); |
|
480 |
||
481 |
||
482 |
print_compile_messages(); |
|
483 |
||
484 |
if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) ) |
|
485 |
_ilt = InlineTree::build_inline_tree_root(); |
|
486 |
else |
|
487 |
_ilt = NULL; |
|
488 |
||
489 |
// Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice |
|
490 |
assert(num_alias_types() >= AliasIdxRaw, ""); |
|
491 |
||
492 |
#define MINIMUM_NODE_HASH 1023 |
|
493 |
// Node list that Iterative GVN will start with |
|
494 |
Unique_Node_List for_igvn(comp_arena()); |
|
495 |
set_for_igvn(&for_igvn); |
|
496 |
||
497 |
// GVN that will be run immediately on new nodes |
|
498 |
uint estimated_size = method()->code_size()*4+64; |
|
499 |
estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size); |
|
500 |
PhaseGVN gvn(node_arena(), estimated_size); |
|
501 |
set_initial_gvn(&gvn); |
|
502 |
||
503 |
{ // Scope for timing the parser |
|
504 |
TracePhase t3("parse", &_t_parser, true); |
|
505 |
||
506 |
// Put top into the hash table ASAP. |
|
507 |
initial_gvn()->transform_no_reclaim(top()); |
|
508 |
||
509 |
// Set up tf(), start(), and find a CallGenerator. |
|
510 |
CallGenerator* cg; |
|
511 |
if (is_osr_compilation()) { |
|
512 |
const TypeTuple *domain = StartOSRNode::osr_domain(); |
|
513 |
const TypeTuple *range = TypeTuple::make_range(method()->signature()); |
|
514 |
init_tf(TypeFunc::make(domain, range)); |
|
515 |
StartNode* s = new (this, 2) StartOSRNode(root(), domain); |
|
516 |
initial_gvn()->set_type_bottom(s); |
|
517 |
init_start(s); |
|
518 |
cg = CallGenerator::for_osr(method(), entry_bci()); |
|
519 |
} else { |
|
520 |
// Normal case. |
|
521 |
init_tf(TypeFunc::make(method())); |
|
522 |
StartNode* s = new (this, 2) StartNode(root(), tf()->domain()); |
|
523 |
initial_gvn()->set_type_bottom(s); |
|
524 |
init_start(s); |
|
525 |
float past_uses = method()->interpreter_invocation_count(); |
|
526 |
float expected_uses = past_uses; |
|
527 |
cg = CallGenerator::for_inline(method(), expected_uses); |
|
528 |
} |
|
529 |
if (failing()) return; |
|
530 |
if (cg == NULL) { |
|
531 |
record_method_not_compilable_all_tiers("cannot parse method"); |
|
532 |
return; |
|
533 |
} |
|
534 |
JVMState* jvms = build_start_state(start(), tf()); |
|
535 |
if ((jvms = cg->generate(jvms)) == NULL) { |
|
536 |
record_method_not_compilable("method parse failed"); |
|
537 |
return; |
|
538 |
} |
|
539 |
GraphKit kit(jvms); |
|
540 |
||
541 |
if (!kit.stopped()) { |
|
542 |
// Accept return values, and transfer control we know not where. |
|
543 |
// This is done by a special, unique ReturnNode bound to root. |
|
544 |
return_values(kit.jvms()); |
|
545 |
} |
|
546 |
||
547 |
if (kit.has_exceptions()) { |
|
548 |
// Any exceptions that escape from this call must be rethrown |
|
549 |
// to whatever caller is dynamically above us on the stack. |
|
550 |
// This is done by a special, unique RethrowNode bound to root. |
|
551 |
rethrow_exceptions(kit.transfer_exceptions_into_jvms()); |
|
552 |
} |
|
553 |
||
1399
9648dfd4ce09
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
1137
diff
changeset
|
554 |
print_method("Before RemoveUseless"); |
9648dfd4ce09
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
1137
diff
changeset
|
555 |
|
1 | 556 |
// Remove clutter produced by parsing. |
557 |
if (!failing()) { |
|
558 |
ResourceMark rm; |
|
559 |
PhaseRemoveUseless pru(initial_gvn(), &for_igvn); |
|
560 |
} |
|
561 |
} |
|
562 |
||
563 |
// Note: Large methods are capped off in do_one_bytecode(). |
|
564 |
if (failing()) return; |
|
565 |
||
566 |
// After parsing, node notes are no longer automagic. |
|
567 |
// They must be propagated by register_new_node_with_optimizer(), |
|
568 |
// clone(), or the like. |
|
569 |
set_default_node_notes(NULL); |
|
570 |
||
571 |
for (;;) { |
|
572 |
int successes = Inline_Warm(); |
|
573 |
if (failing()) return; |
|
574 |
if (successes == 0) break; |
|
575 |
} |
|
576 |
||
577 |
// Drain the list. |
|
578 |
Finish_Warm(); |
|
579 |
#ifndef PRODUCT |
|
580 |
if (_printer) { |
|
581 |
_printer->print_inlining(this); |
|
582 |
} |
|
583 |
#endif |
|
584 |
||
585 |
if (failing()) return; |
|
586 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
587 |
||
588 |
// Perform escape analysis |
|
952
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
589 |
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) { |
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
590 |
TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true); |
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
591 |
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction. |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
592 |
PhaseGVN* igvn = initial_gvn(); |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
593 |
Node* oop_null = igvn->zerocon(T_OBJECT); |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
594 |
Node* noop_null = igvn->zerocon(T_NARROWOOP); |
952
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
595 |
|
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
596 |
_congraph = new(comp_arena()) ConnectionGraph(this); |
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
597 |
bool has_non_escaping_obj = _congraph->compute_escape(); |
211
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
598 |
|
1 | 599 |
#ifndef PRODUCT |
600 |
if (PrintEscapeAnalysis) { |
|
601 |
_congraph->dump(); |
|
602 |
} |
|
603 |
#endif |
|
961
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
604 |
// Cleanup. |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
605 |
if (oop_null->outcnt() == 0) |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
606 |
igvn->hash_delete(oop_null); |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
607 |
if (noop_null->outcnt() == 0) |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
608 |
igvn->hash_delete(noop_null); |
7fb3b13d4205
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
955
diff
changeset
|
609 |
|
952
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
610 |
if (!has_non_escaping_obj) { |
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
611 |
_congraph = NULL; |
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
612 |
} |
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
613 |
|
38812d18eec0
6684714: Optimize EA Connection Graph build performance
kvn
parents:
769
diff
changeset
|
614 |
if (failing()) return; |
1 | 615 |
} |
616 |
// Now optimize |
|
617 |
Optimize(); |
|
618 |
if (failing()) return; |
|
619 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
620 |
||
621 |
#ifndef PRODUCT |
|
622 |
if (PrintIdeal) { |
|
623 |
ttyLocker ttyl; // keep the following output all in one block |
|
624 |
// This output goes directly to the tty, not the compiler log. |
|
625 |
// To enable tools to match it up with the compilation activity, |
|
626 |
// be sure to tag this tty output with the compile ID. |
|
627 |
if (xtty != NULL) { |
|
628 |
xtty->head("ideal compile_id='%d'%s", compile_id(), |
|
629 |
is_osr_compilation() ? " compile_kind='osr'" : |
|
630 |
""); |
|
631 |
} |
|
632 |
root()->dump(9999); |
|
633 |
if (xtty != NULL) { |
|
634 |
xtty->tail("ideal"); |
|
635 |
} |
|
636 |
} |
|
637 |
#endif |
|
638 |
||
639 |
// Now that we know the size of all the monitors we can add a fixed slot |
|
640 |
// for the original deopt pc. |
|
641 |
||
642 |
_orig_pc_slot = fixed_slots(); |
|
643 |
int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size); |
|
644 |
set_fixed_slots(next_slot); |
|
645 |
||
646 |
// Now generate code |
|
647 |
Code_Gen(); |
|
648 |
if (failing()) return; |
|
649 |
||
650 |
// Check if we want to skip execution of all compiled code. |
|
651 |
{ |
|
652 |
#ifndef PRODUCT |
|
653 |
if (OptoNoExecute) { |
|
654 |
record_method_not_compilable("+OptoNoExecute"); // Flag as failed |
|
655 |
return; |
|
656 |
} |
|
657 |
TracePhase t2("install_code", &_t_registerMethod, TimeCompiler); |
|
658 |
#endif |
|
659 |
||
660 |
if (is_osr_compilation()) { |
|
661 |
_code_offsets.set_value(CodeOffsets::Verified_Entry, 0); |
|
662 |
_code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size); |
|
663 |
} else { |
|
664 |
_code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size); |
|
665 |
_code_offsets.set_value(CodeOffsets::OSR_Entry, 0); |
|
666 |
} |
|
667 |
||
668 |
env()->register_method(_method, _entry_bci, |
|
669 |
&_code_offsets, |
|
670 |
_orig_pc_slot_offset_in_bytes, |
|
671 |
code_buffer(), |
|
672 |
frame_size_in_words(), _oop_map_set, |
|
673 |
&_handler_table, &_inc_table, |
|
674 |
compiler, |
|
675 |
env()->comp_level(), |
|
676 |
true, /*has_debug_info*/ |
|
677 |
has_unsafe_access() |
|
678 |
); |
|
679 |
} |
|
680 |
} |
|
681 |
||
682 |
//------------------------------Compile---------------------------------------- |
|
683 |
// Compile a runtime stub |
|
684 |
Compile::Compile( ciEnv* ci_env, |
|
685 |
TypeFunc_generator generator, |
|
686 |
address stub_function, |
|
687 |
const char *stub_name, |
|
688 |
int is_fancy_jump, |
|
689 |
bool pass_tls, |
|
690 |
bool save_arg_registers, |
|
691 |
bool return_pc ) |
|
692 |
: Phase(Compiler), |
|
693 |
_env(ci_env), |
|
694 |
_log(ci_env->log()), |
|
695 |
_compile_id(-1), |
|
696 |
_save_argument_registers(save_arg_registers), |
|
697 |
_method(NULL), |
|
698 |
_stub_name(stub_name), |
|
699 |
_stub_function(stub_function), |
|
700 |
_stub_entry_point(NULL), |
|
701 |
_entry_bci(InvocationEntryBci), |
|
702 |
_initial_gvn(NULL), |
|
703 |
_for_igvn(NULL), |
|
704 |
_warm_calls(NULL), |
|
705 |
_orig_pc_slot(0), |
|
706 |
_orig_pc_slot_offset_in_bytes(0), |
|
707 |
_subsume_loads(true), |
|
211
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
708 |
_do_escape_analysis(false), |
1 | 709 |
_failure_reason(NULL), |
710 |
_code_buffer("Compile::Fill_buffer"), |
|
711 |
_node_bundling_limit(0), |
|
712 |
_node_bundling_base(NULL), |
|
713 |
#ifndef PRODUCT |
|
714 |
_trace_opto_output(TraceOptoOutput), |
|
715 |
_printer(NULL), |
|
716 |
#endif |
|
717 |
_congraph(NULL) { |
|
718 |
C = this; |
|
719 |
||
720 |
#ifndef PRODUCT |
|
721 |
TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false); |
|
722 |
TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false); |
|
723 |
set_print_assembly(PrintFrameConverterAssembly); |
|
1399
9648dfd4ce09
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
1137
diff
changeset
|
724 |
set_parsed_irreducible_loop(false); |
1 | 725 |
#endif |
726 |
CompileWrapper cw(this); |
|
727 |
Init(/*AliasLevel=*/ 0); |
|
728 |
init_tf((*generator)()); |
|
729 |
||
730 |
{ |
|
731 |
// The following is a dummy for the sake of GraphKit::gen_stub |
|
732 |
Unique_Node_List for_igvn(comp_arena()); |
|
733 |
set_for_igvn(&for_igvn); // not used, but some GraphKit guys push on this |
|
734 |
PhaseGVN gvn(Thread::current()->resource_area(),255); |
|
735 |
set_initial_gvn(&gvn); // not significant, but GraphKit guys use it pervasively |
|
736 |
gvn.transform_no_reclaim(top()); |
|
737 |
||
738 |
GraphKit kit; |
|
739 |
kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc); |
|
740 |
} |
|
741 |
||
742 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
743 |
Code_Gen(); |
|
744 |
if (failing()) return; |
|
745 |
||
746 |
||
747 |
// Entry point will be accessed using compile->stub_entry_point(); |
|
748 |
if (code_buffer() == NULL) { |
|
749 |
Matcher::soft_match_failure(); |
|
750 |
} else { |
|
751 |
if (PrintAssembly && (WizardMode || Verbose)) |
|
752 |
tty->print_cr("### Stub::%s", stub_name); |
|
753 |
||
754 |
if (!failing()) { |
|
755 |
assert(_fixed_slots == 0, "no fixed slots used for runtime stubs"); |
|
756 |
||
757 |
// Make the NMethod |
|
758 |
// For now we mark the frame as never safe for profile stackwalking |
|
759 |
RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name, |
|
760 |
code_buffer(), |
|
761 |
CodeOffsets::frame_never_safe, |
|
762 |
// _code_offsets.value(CodeOffsets::Frame_Complete), |
|
763 |
frame_size_in_words(), |
|
764 |
_oop_map_set, |
|
765 |
save_arg_registers); |
|
766 |
assert(rs != NULL && rs->is_runtime_stub(), "sanity check"); |
|
767 |
||
768 |
_stub_entry_point = rs->entry_point(); |
|
769 |
} |
|
770 |
} |
|
771 |
} |
|
772 |
||
773 |
#ifndef PRODUCT |
|
774 |
void print_opto_verbose_signature( const TypeFunc *j_sig, const char *stub_name ) { |
|
775 |
if(PrintOpto && Verbose) { |
|
776 |
tty->print("%s ", stub_name); j_sig->print_flattened(); tty->cr(); |
|
777 |
} |
|
778 |
} |
|
779 |
#endif |
|
780 |
||
781 |
void Compile::print_codes() { |
|
782 |
} |
|
783 |
||
784 |
//------------------------------Init------------------------------------------- |
|
785 |
// Prepare for a single compilation |
|
786 |
void Compile::Init(int aliaslevel) { |
|
787 |
_unique = 0; |
|
788 |
_regalloc = NULL; |
|
789 |
||
790 |
_tf = NULL; // filled in later |
|
791 |
_top = NULL; // cached later |
|
792 |
_matcher = NULL; // filled in later |
|
793 |
_cfg = NULL; // filled in later |
|
794 |
||
795 |
set_24_bit_selection_and_mode(Use24BitFP, false); |
|
796 |
||
797 |
_node_note_array = NULL; |
|
798 |
_default_node_notes = NULL; |
|
799 |
||
800 |
_immutable_memory = NULL; // filled in at first inquiry |
|
801 |
||
802 |
// Globally visible Nodes |
|
803 |
// First set TOP to NULL to give safe behavior during creation of RootNode |
|
804 |
set_cached_top_node(NULL); |
|
805 |
set_root(new (this, 3) RootNode()); |
|
806 |
// Now that you have a Root to point to, create the real TOP |
|
807 |
set_cached_top_node( new (this, 1) ConNode(Type::TOP) ); |
|
808 |
set_recent_alloc(NULL, NULL); |
|
809 |
||
810 |
// Create Debug Information Recorder to record scopes, oopmaps, etc. |
|
811 |
env()->set_oop_recorder(new OopRecorder(comp_arena())); |
|
812 |
env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder())); |
|
813 |
env()->set_dependencies(new Dependencies(env())); |
|
814 |
||
815 |
_fixed_slots = 0; |
|
816 |
set_has_split_ifs(false); |
|
817 |
set_has_loops(has_method() && method()->has_loops()); // first approximation |
|
818 |
_deopt_happens = true; // start out assuming the worst |
|
819 |
_trap_can_recompile = false; // no traps emitted yet |
|
820 |
_major_progress = true; // start out assuming good things will happen |
|
821 |
set_has_unsafe_access(false); |
|
822 |
Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist)); |
|
823 |
set_decompile_count(0); |
|
824 |
||
825 |
// Compilation level related initialization |
|
826 |
if (env()->comp_level() == CompLevel_fast_compile) { |
|
827 |
set_num_loop_opts(Tier1LoopOptsCount); |
|
828 |
set_do_inlining(Tier1Inline != 0); |
|
829 |
set_max_inline_size(Tier1MaxInlineSize); |
|
830 |
set_freq_inline_size(Tier1FreqInlineSize); |
|
831 |
set_do_scheduling(false); |
|
832 |
set_do_count_invocations(Tier1CountInvocations); |
|
833 |
set_do_method_data_update(Tier1UpdateMethodData); |
|
834 |
} else { |
|
835 |
assert(env()->comp_level() == CompLevel_full_optimization, "unknown comp level"); |
|
836 |
set_num_loop_opts(LoopOptsCount); |
|
837 |
set_do_inlining(Inline); |
|
838 |
set_max_inline_size(MaxInlineSize); |
|
839 |
set_freq_inline_size(FreqInlineSize); |
|
840 |
set_do_scheduling(OptoScheduling); |
|
841 |
set_do_count_invocations(false); |
|
842 |
set_do_method_data_update(false); |
|
843 |
} |
|
844 |
||
845 |
if (debug_info()->recording_non_safepoints()) { |
|
846 |
set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*> |
|
847 |
(comp_arena(), 8, 0, NULL)); |
|
848 |
set_default_node_notes(Node_Notes::make(this)); |
|
849 |
} |
|
850 |
||
851 |
// // -- Initialize types before each compile -- |
|
852 |
// // Update cached type information |
|
853 |
// if( _method && _method->constants() ) |
|
854 |
// Type::update_loaded_types(_method, _method->constants()); |
|
855 |
||
856 |
// Init alias_type map. |
|
211
e2b60448c234
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
1
diff
changeset
|
857 |
if (!_do_escape_analysis && aliaslevel == 3) |
1 | 858 |
aliaslevel = 2; // No unique types without escape analysis |
859 |
_AliasLevel = aliaslevel; |
|
860 |
const int grow_ats = 16; |
|
861 |
_max_alias_types = grow_ats; |
|
862 |
_alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats); |
|
863 |
AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats); |
|
864 |
Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats); |
|
865 |
{ |
|
866 |
for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i]; |
|
867 |
} |
|
868 |
// Initialize the first few types. |
|
869 |
_alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL); |
|
870 |
_alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM); |
|
871 |
_alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM); |
|
872 |
_num_alias_types = AliasIdxRaw+1; |
|
873 |
// Zero out the alias type cache. |
|
874 |
Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache)); |
|
875 |
// A NULL adr_type hits in the cache right away. Preload the right answer. |
|
876 |
probe_alias_cache(NULL)->_index = AliasIdxTop; |
|
877 |
||
878 |
_intrinsics = NULL; |
|
879 |
_macro_nodes = new GrowableArray<Node*>(comp_arena(), 8, 0, NULL); |
|
880 |
register_library_intrinsics(); |
|
881 |
} |
|
882 |
||
883 |
//---------------------------init_start---------------------------------------- |
|
884 |
// Install the StartNode on this compile object. |
|
885 |
void Compile::init_start(StartNode* s) { |
|
886 |
if (failing()) |
|
887 |
return; // already failing |
|
888 |
assert(s == start(), ""); |
|
889 |
} |
|
890 |
||
891 |
StartNode* Compile::start() const { |
|
892 |
assert(!failing(), ""); |
|
893 |
for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) { |
|
894 |
Node* start = root()->fast_out(i); |
|
895 |
if( start->is_Start() ) |
|
896 |
return start->as_Start(); |
|
897 |
} |
|
898 |
ShouldNotReachHere(); |
|
899 |
return NULL; |
|
900 |
} |
|
901 |
||
902 |
//-------------------------------immutable_memory------------------------------------- |
|
903 |
// Access immutable memory |
|
904 |
Node* Compile::immutable_memory() { |
|
905 |
if (_immutable_memory != NULL) { |
|
906 |
return _immutable_memory; |
|
907 |
} |
|
908 |
StartNode* s = start(); |
|
909 |
for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) { |
|
910 |
Node *p = s->fast_out(i); |
|
911 |
if (p != s && p->as_Proj()->_con == TypeFunc::Memory) { |
|
912 |
_immutable_memory = p; |
|
913 |
return _immutable_memory; |
|
914 |
} |
|
915 |
} |
|
916 |
ShouldNotReachHere(); |
|
917 |
return NULL; |
|
918 |
} |
|
919 |
||
920 |
//----------------------set_cached_top_node------------------------------------ |
|
921 |
// Install the cached top node, and make sure Node::is_top works correctly. |
|
922 |
void Compile::set_cached_top_node(Node* tn) { |
|
923 |
if (tn != NULL) verify_top(tn); |
|
924 |
Node* old_top = _top; |
|
925 |
_top = tn; |
|
926 |
// Calling Node::setup_is_top allows the nodes the chance to adjust |
|
927 |
// their _out arrays. |
|
928 |
if (_top != NULL) _top->setup_is_top(); |
|
929 |
if (old_top != NULL) old_top->setup_is_top(); |
|
930 |
assert(_top == NULL || top()->is_top(), ""); |
|
931 |
} |
|
932 |
||
933 |
#ifndef PRODUCT |
|
934 |
void Compile::verify_top(Node* tn) const { |
|
935 |
if (tn != NULL) { |
|
936 |
assert(tn->is_Con(), "top node must be a constant"); |
|
937 |
assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type"); |
|
938 |
assert(tn->in(0) != NULL, "must have live top node"); |
|
939 |
} |
|
940 |
} |
|
941 |
#endif |
|
942 |
||
943 |
||
944 |
///-------------------Managing Per-Node Debug & Profile Info------------------- |
|
945 |
||
946 |
void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) { |
|
947 |
guarantee(arr != NULL, ""); |
|
948 |
int num_blocks = arr->length(); |
|
949 |
if (grow_by < num_blocks) grow_by = num_blocks; |
|
950 |
int num_notes = grow_by * _node_notes_block_size; |
|
951 |
Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes); |
|
952 |
Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes)); |
|
953 |
while (num_notes > 0) { |
|
954 |
arr->append(notes); |
|
955 |
notes += _node_notes_block_size; |
|
956 |
num_notes -= _node_notes_block_size; |
|
957 |
} |
|
958 |
assert(num_notes == 0, "exact multiple, please"); |
|
959 |
} |
|
960 |
||
961 |
bool Compile::copy_node_notes_to(Node* dest, Node* source) { |
|
962 |
if (source == NULL || dest == NULL) return false; |
|
963 |
||
964 |
if (dest->is_Con()) |
|
965 |
return false; // Do not push debug info onto constants. |
|
966 |
||
967 |
#ifdef ASSERT |
|
968 |
// Leave a bread crumb trail pointing to the original node: |
|
969 |
if (dest != NULL && dest != source && dest->debug_orig() == NULL) { |
|
970 |
dest->set_debug_orig(source); |
|
971 |
} |
|
972 |
#endif |
|
973 |
||
974 |
if (node_note_array() == NULL) |
|
975 |
return false; // Not collecting any notes now. |
|
976 |
||
977 |
// This is a copy onto a pre-existing node, which may already have notes. |
|
978 |
// If both nodes have notes, do not overwrite any pre-existing notes. |
|
979 |
Node_Notes* source_notes = node_notes_at(source->_idx); |
|
980 |
if (source_notes == NULL || source_notes->is_clear()) return false; |
|
981 |
Node_Notes* dest_notes = node_notes_at(dest->_idx); |
|
982 |
if (dest_notes == NULL || dest_notes->is_clear()) { |
|
983 |
return set_node_notes_at(dest->_idx, source_notes); |
|
984 |
} |
|
985 |
||
986 |
Node_Notes merged_notes = (*source_notes); |
|
987 |
// The order of operations here ensures that dest notes will win... |
|
988 |
merged_notes.update_from(dest_notes); |
|
989 |
return set_node_notes_at(dest->_idx, &merged_notes); |
|
990 |
} |
|
991 |
||
992 |
||
993 |
//--------------------------allow_range_check_smearing------------------------- |
|
994 |
// Gating condition for coalescing similar range checks. |
|
995 |
// Sometimes we try 'speculatively' replacing a series of a range checks by a |
|
996 |
// single covering check that is at least as strong as any of them. |
|
997 |
// If the optimization succeeds, the simplified (strengthened) range check |
|
998 |
// will always succeed. If it fails, we will deopt, and then give up |
|
999 |
// on the optimization. |
|
1000 |
bool Compile::allow_range_check_smearing() const { |
|
1001 |
// If this method has already thrown a range-check, |
|
1002 |
// assume it was because we already tried range smearing |
|
1003 |
// and it failed. |
|
1004 |
uint already_trapped = trap_count(Deoptimization::Reason_range_check); |
|
1005 |
return !already_trapped; |
|
1006 |
} |
|
1007 |
||
1008 |
||
1009 |
//------------------------------flatten_alias_type----------------------------- |
|
1010 |
const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { |
|
1011 |
int offset = tj->offset(); |
|
1012 |
TypePtr::PTR ptr = tj->ptr(); |
|
1013 |
||
955 | 1014 |
// Known instance (scalarizable allocation) alias only with itself. |
1015 |
bool is_known_inst = tj->isa_oopptr() != NULL && |
|
1016 |
tj->is_oopptr()->is_known_instance(); |
|
1017 |
||
1 | 1018 |
// Process weird unsafe references. |
1019 |
if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) { |
|
1020 |
assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops"); |
|
955 | 1021 |
assert(!is_known_inst, "scalarizable allocation should not have unsafe references"); |
1 | 1022 |
tj = TypeOopPtr::BOTTOM; |
1023 |
ptr = tj->ptr(); |
|
1024 |
offset = tj->offset(); |
|
1025 |
} |
|
1026 |
||
1027 |
// Array pointers need some flattening |
|
1028 |
const TypeAryPtr *ta = tj->isa_aryptr(); |
|
955 | 1029 |
if( ta && is_known_inst ) { |
1030 |
if ( offset != Type::OffsetBot && |
|
1031 |
offset > arrayOopDesc::length_offset_in_bytes() ) { |
|
1032 |
offset = Type::OffsetBot; // Flatten constant access into array body only |
|
1033 |
tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id()); |
|
1034 |
} |
|
1035 |
} else if( ta && _AliasLevel >= 2 ) { |
|
1 | 1036 |
// For arrays indexed by constant indices, we flatten the alias |
1037 |
// space to include all of the array body. Only the header, klass |
|
1038 |
// and array length can be accessed un-aliased. |
|
1039 |
if( offset != Type::OffsetBot ) { |
|
1040 |
if( ta->const_oop() ) { // methodDataOop or methodOop |
|
1041 |
offset = Type::OffsetBot; // Flatten constant access into array body |
|
955 | 1042 |
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset); |
1 | 1043 |
} else if( offset == arrayOopDesc::length_offset_in_bytes() ) { |
1044 |
// range is OK as-is. |
|
1045 |
tj = ta = TypeAryPtr::RANGE; |
|
1046 |
} else if( offset == oopDesc::klass_offset_in_bytes() ) { |
|
1047 |
tj = TypeInstPtr::KLASS; // all klass loads look alike |
|
1048 |
ta = TypeAryPtr::RANGE; // generic ignored junk |
|
1049 |
ptr = TypePtr::BotPTR; |
|
1050 |
} else if( offset == oopDesc::mark_offset_in_bytes() ) { |
|
1051 |
tj = TypeInstPtr::MARK; |
|
1052 |
ta = TypeAryPtr::RANGE; // generic ignored junk |
|
1053 |
ptr = TypePtr::BotPTR; |
|
1054 |
} else { // Random constant offset into array body |
|
1055 |
offset = Type::OffsetBot; // Flatten constant access into array body |
|
955 | 1056 |
tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset); |
1 | 1057 |
} |
1058 |
} |
|
1059 |
// Arrays of fixed size alias with arrays of unknown size. |
|
1060 |
if (ta->size() != TypeInt::POS) { |
|
1061 |
const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS); |
|
955 | 1062 |
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset); |
1 | 1063 |
} |
1064 |
// Arrays of known objects become arrays of unknown objects. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1065 |
if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1066 |
const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size()); |
955 | 1067 |
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1068 |
} |
1 | 1069 |
if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { |
1070 |
const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size()); |
|
955 | 1071 |
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); |
1 | 1072 |
} |
1073 |
// Arrays of bytes and of booleans both use 'bastore' and 'baload' so |
|
1074 |
// cannot be distinguished by bytecode alone. |
|
1075 |
if (ta->elem() == TypeInt::BOOL) { |
|
1076 |
const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size()); |
|
1077 |
ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE); |
|
955 | 1078 |
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset); |
1 | 1079 |
} |
1080 |
// During the 2nd round of IterGVN, NotNull castings are removed. |
|
1081 |
// Make sure the Bottom and NotNull variants alias the same. |
|
1082 |
// Also, make sure exact and non-exact variants alias the same. |
|
1083 |
if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) { |
|
1084 |
if (ta->const_oop()) { |
|
1085 |
tj = ta = TypeAryPtr::make(TypePtr::Constant,ta->const_oop(),ta->ary(),ta->klass(),false,offset); |
|
1086 |
} else { |
|
1087 |
tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset); |
|
1088 |
} |
|
1089 |
} |
|
1090 |
} |
|
1091 |
||
1092 |
// Oop pointers need some flattening |
|
1093 |
const TypeInstPtr *to = tj->isa_instptr(); |
|
1094 |
if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) { |
|
1095 |
if( ptr == TypePtr::Constant ) { |
|
1096 |
// No constant oop pointers (such as Strings); they alias with |
|
1097 |
// unknown strings. |
|
955 | 1098 |
assert(!is_known_inst, "not scalarizable allocation"); |
1 | 1099 |
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); |
955 | 1100 |
} else if( is_known_inst ) { |
589 | 1101 |
tj = to; // Keep NotNull and klass_is_exact for instance type |
1 | 1102 |
} else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { |
1103 |
// During the 2nd round of IterGVN, NotNull castings are removed. |
|
1104 |
// Make sure the Bottom and NotNull variants alias the same. |
|
1105 |
// Also, make sure exact and non-exact variants alias the same. |
|
955 | 1106 |
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); |
1 | 1107 |
} |
1108 |
// Canonicalize the holder of this field |
|
1109 |
ciInstanceKlass *k = to->klass()->as_instance_klass(); |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1110 |
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { |
1 | 1111 |
// First handle header references such as a LoadKlassNode, even if the |
1112 |
// object's klass is unloaded at compile time (4965979). |
|
955 | 1113 |
if (!is_known_inst) { // Do it only for non-instance types |
1114 |
tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); |
|
1115 |
} |
|
1 | 1116 |
} else if (offset < 0 || offset >= k->size_helper() * wordSize) { |
1117 |
to = NULL; |
|
1118 |
tj = TypeOopPtr::BOTTOM; |
|
1119 |
offset = tj->offset(); |
|
1120 |
} else { |
|
1121 |
ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); |
|
1122 |
if (!k->equals(canonical_holder) || tj->offset() != offset) { |
|
955 | 1123 |
if( is_known_inst ) { |
1124 |
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id()); |
|
1125 |
} else { |
|
1126 |
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset); |
|
1127 |
} |
|
1 | 1128 |
} |
1129 |
} |
|
1130 |
} |
|
1131 |
||
1132 |
// Klass pointers to object array klasses need some flattening |
|
1133 |
const TypeKlassPtr *tk = tj->isa_klassptr(); |
|
1134 |
if( tk ) { |
|
1135 |
// If we are referencing a field within a Klass, we need |
|
1136 |
// to assume the worst case of an Object. Both exact and |
|
1137 |
// inexact types must flatten to the same alias class. |
|
1138 |
// Since the flattened result for a klass is defined to be |
|
1139 |
// precisely java.lang.Object, use a constant ptr. |
|
1140 |
if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) { |
|
1141 |
||
1142 |
tj = tk = TypeKlassPtr::make(TypePtr::Constant, |
|
1143 |
TypeKlassPtr::OBJECT->klass(), |
|
1144 |
offset); |
|
1145 |
} |
|
1146 |
||
1147 |
ciKlass* klass = tk->klass(); |
|
1148 |
if( klass->is_obj_array_klass() ) { |
|
1149 |
ciKlass* k = TypeAryPtr::OOPS->klass(); |
|
1150 |
if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs |
|
1151 |
k = TypeInstPtr::BOTTOM->klass(); |
|
1152 |
tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset ); |
|
1153 |
} |
|
1154 |
||
1155 |
// Check for precise loads from the primary supertype array and force them |
|
1156 |
// to the supertype cache alias index. Check for generic array loads from |
|
1157 |
// the primary supertype array and also force them to the supertype cache |
|
1158 |
// alias index. Since the same load can reach both, we need to merge |
|
1159 |
// these 2 disparate memories into the same alias class. Since the |
|
1160 |
// primary supertype array is read-only, there's no chance of confusion |
|
1161 |
// where we bypass an array load and an array store. |
|
1162 |
uint off2 = offset - Klass::primary_supers_offset_in_bytes(); |
|
1163 |
if( offset == Type::OffsetBot || |
|
1164 |
off2 < Klass::primary_super_limit()*wordSize ) { |
|
1165 |
offset = sizeof(oopDesc) +Klass::secondary_super_cache_offset_in_bytes(); |
|
1166 |
tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset ); |
|
1167 |
} |
|
1168 |
} |
|
1169 |
||
1170 |
// Flatten all Raw pointers together. |
|
1171 |
if (tj->base() == Type::RawPtr) |
|
1172 |
tj = TypeRawPtr::BOTTOM; |
|
1173 |
||
1174 |
if (tj->base() == Type::AnyPtr) |
|
1175 |
tj = TypePtr::BOTTOM; // An error, which the caller must check for. |
|
1176 |
||
1177 |
// Flatten all to bottom for now |
|
1178 |
switch( _AliasLevel ) { |
|
1179 |
case 0: |
|
1180 |
tj = TypePtr::BOTTOM; |
|
1181 |
break; |
|
1182 |
case 1: // Flatten to: oop, static, field or array |
|
1183 |
switch (tj->base()) { |
|
1184 |
//case Type::AryPtr: tj = TypeAryPtr::RANGE; break; |
|
1185 |
case Type::RawPtr: tj = TypeRawPtr::BOTTOM; break; |
|
1186 |
case Type::AryPtr: // do not distinguish arrays at all |
|
1187 |
case Type::InstPtr: tj = TypeInstPtr::BOTTOM; break; |
|
1188 |
case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break; |
|
1189 |
case Type::AnyPtr: tj = TypePtr::BOTTOM; break; // caller checks it |
|
1190 |
default: ShouldNotReachHere(); |
|
1191 |
} |
|
1192 |
break; |
|
1193 |
case 2: // No collasping at level 2; keep all splits |
|
1194 |
case 3: // No collasping at level 3; keep all splits |
|
1195 |
break; |
|
1196 |
default: |
|
1197 |
Unimplemented(); |
|
1198 |
} |
|
1199 |
||
1200 |
offset = tj->offset(); |
|
1201 |
assert( offset != Type::OffsetTop, "Offset has fallen from constant" ); |
|
1202 |
||
1203 |
assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) || |
|
1204 |
(offset == Type::OffsetBot && tj->base() == Type::AryPtr) || |
|
1205 |
(offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) || |
|
1206 |
(offset == Type::OffsetBot && tj == TypePtr::BOTTOM) || |
|
1207 |
(offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) || |
|
1208 |
(offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) || |
|
1209 |
(offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr) , |
|
1210 |
"For oops, klasses, raw offset must be constant; for arrays the offset is never known" ); |
|
1211 |
assert( tj->ptr() != TypePtr::TopPTR && |
|
1212 |
tj->ptr() != TypePtr::AnyNull && |
|
1213 |
tj->ptr() != TypePtr::Null, "No imprecise addresses" ); |
|
1214 |
// assert( tj->ptr() != TypePtr::Constant || |
|
1215 |
// tj->base() == Type::RawPtr || |
|
1216 |
// tj->base() == Type::KlassPtr, "No constant oop addresses" ); |
|
1217 |
||
1218 |
return tj; |
|
1219 |
} |
|
1220 |
||
1221 |
void Compile::AliasType::Init(int i, const TypePtr* at) { |
|
1222 |
_index = i; |
|
1223 |
_adr_type = at; |
|
1224 |
_field = NULL; |
|
1225 |
_is_rewritable = true; // default |
|
1226 |
const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL; |
|
769 | 1227 |
if (atoop != NULL && atoop->is_known_instance()) { |
1228 |
const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot); |
|
1 | 1229 |
_general_index = Compile::current()->get_alias_index(gt); |
1230 |
} else { |
|
1231 |
_general_index = 0; |
|
1232 |
} |
|
1233 |
} |
|
1234 |
||
1235 |
//---------------------------------print_on------------------------------------ |
|
1236 |
#ifndef PRODUCT |
|
1237 |
void Compile::AliasType::print_on(outputStream* st) { |
|
1238 |
if (index() < 10) |
|
1239 |
st->print("@ <%d> ", index()); |
|
1240 |
else st->print("@ <%d>", index()); |
|
1241 |
st->print(is_rewritable() ? " " : " RO"); |
|
1242 |
int offset = adr_type()->offset(); |
|
1243 |
if (offset == Type::OffsetBot) |
|
1244 |
st->print(" +any"); |
|
1245 |
else st->print(" +%-3d", offset); |
|
1246 |
st->print(" in "); |
|
1247 |
adr_type()->dump_on(st); |
|
1248 |
const TypeOopPtr* tjp = adr_type()->isa_oopptr(); |
|
1249 |
if (field() != NULL && tjp) { |
|
1250 |
if (tjp->klass() != field()->holder() || |
|
1251 |
tjp->offset() != field()->offset_in_bytes()) { |
|
1252 |
st->print(" != "); |
|
1253 |
field()->print(); |
|
1254 |
st->print(" ***"); |
|
1255 |
} |
|
1256 |
} |
|
1257 |
} |
|
1258 |
||
1259 |
void print_alias_types() { |
|
1260 |
Compile* C = Compile::current(); |
|
1261 |
tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1); |
|
1262 |
for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) { |
|
1263 |
C->alias_type(idx)->print_on(tty); |
|
1264 |
tty->cr(); |
|
1265 |
} |
|
1266 |
} |
|
1267 |
#endif |
|
1268 |
||
1269 |
||
1270 |
//----------------------------probe_alias_cache-------------------------------- |
|
1271 |
Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) { |
|
1272 |
intptr_t key = (intptr_t) adr_type; |
|
1273 |
key ^= key >> logAliasCacheSize; |
|
1274 |
return &_alias_cache[key & right_n_bits(logAliasCacheSize)]; |
|
1275 |
} |
|
1276 |
||
1277 |
||
1278 |
//-----------------------------grow_alias_types-------------------------------- |
|
1279 |
void Compile::grow_alias_types() { |
|
1280 |
const int old_ats = _max_alias_types; // how many before? |
|
1281 |
const int new_ats = old_ats; // how many more? |
|
1282 |
const int grow_ats = old_ats+new_ats; // how many now? |
|
1283 |
_max_alias_types = grow_ats; |
|
1284 |
_alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats); |
|
1285 |
AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats); |
|
1286 |
Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats); |
|
1287 |
for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i]; |
|
1288 |
} |
|
1289 |
||
1290 |
||
1291 |
//--------------------------------find_alias_type------------------------------ |
|
1292 |
Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create) { |
|
1293 |
if (_AliasLevel == 0) |
|
1294 |
return alias_type(AliasIdxBot); |
|
1295 |
||
1296 |
AliasCacheEntry* ace = probe_alias_cache(adr_type); |
|
1297 |
if (ace->_adr_type == adr_type) { |
|
1298 |
return alias_type(ace->_index); |
|
1299 |
} |
|
1300 |
||
1301 |
// Handle special cases. |
|
1302 |
if (adr_type == NULL) return alias_type(AliasIdxTop); |
|
1303 |
if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot); |
|
1304 |
||
1305 |
// Do it the slow way. |
|
1306 |
const TypePtr* flat = flatten_alias_type(adr_type); |
|
1307 |
||
1308 |
#ifdef ASSERT |
|
1309 |
assert(flat == flatten_alias_type(flat), "idempotent"); |
|
1310 |
assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr"); |
|
1311 |
if (flat->isa_oopptr() && !flat->isa_klassptr()) { |
|
1312 |
const TypeOopPtr* foop = flat->is_oopptr(); |
|
955 | 1313 |
// Scalarizable allocations have exact klass always. |
1314 |
bool exact = !foop->klass_is_exact() || foop->is_known_instance(); |
|
1315 |
const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr(); |
|
1 | 1316 |
assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type"); |
1317 |
} |
|
1318 |
assert(flat == flatten_alias_type(flat), "exact bit doesn't matter"); |
|
1319 |
#endif |
|
1320 |
||
1321 |
int idx = AliasIdxTop; |
|
1322 |
for (int i = 0; i < num_alias_types(); i++) { |
|
1323 |
if (alias_type(i)->adr_type() == flat) { |
|
1324 |
idx = i; |
|
1325 |
break; |
|
1326 |
} |
|
1327 |
} |
|
1328 |
||
1329 |
if (idx == AliasIdxTop) { |
|
1330 |
if (no_create) return NULL; |
|
1331 |
// Grow the array if necessary. |
|
1332 |
if (_num_alias_types == _max_alias_types) grow_alias_types(); |
|
1333 |
// Add a new alias type. |
|
1334 |
idx = _num_alias_types++; |
|
1335 |
_alias_types[idx]->Init(idx, flat); |
|
1336 |
if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false); |
|
1337 |
if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false); |
|
1338 |
if (flat->isa_instptr()) { |
|
1339 |
if (flat->offset() == java_lang_Class::klass_offset_in_bytes() |
|
1340 |
&& flat->is_instptr()->klass() == env()->Class_klass()) |
|
1341 |
alias_type(idx)->set_rewritable(false); |
|
1342 |
} |
|
1343 |
if (flat->isa_klassptr()) { |
|
1344 |
if (flat->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc)) |
|
1345 |
alias_type(idx)->set_rewritable(false); |
|
1346 |
if (flat->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc)) |
|
1347 |
alias_type(idx)->set_rewritable(false); |
|
1348 |
if (flat->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc)) |
|
1349 |
alias_type(idx)->set_rewritable(false); |
|
1350 |
if (flat->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc)) |
|
1351 |
alias_type(idx)->set_rewritable(false); |
|
1352 |
} |
|
1353 |
// %%% (We would like to finalize JavaThread::threadObj_offset(), |
|
1354 |
// but the base pointer type is not distinctive enough to identify |
|
1355 |
// references into JavaThread.) |
|
1356 |
||
1357 |
// Check for final instance fields. |
|
1358 |
const TypeInstPtr* tinst = flat->isa_instptr(); |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1359 |
if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { |
1 | 1360 |
ciInstanceKlass *k = tinst->klass()->as_instance_klass(); |
1361 |
ciField* field = k->get_field_by_offset(tinst->offset(), false); |
|
1362 |
// Set field() and is_rewritable() attributes. |
|
1363 |
if (field != NULL) alias_type(idx)->set_field(field); |
|
1364 |
} |
|
1365 |
const TypeKlassPtr* tklass = flat->isa_klassptr(); |
|
1366 |
// Check for final static fields. |
|
1367 |
if (tklass && tklass->klass()->is_instance_klass()) { |
|
1368 |
ciInstanceKlass *k = tklass->klass()->as_instance_klass(); |
|
1369 |
ciField* field = k->get_field_by_offset(tklass->offset(), true); |
|
1370 |
// Set field() and is_rewritable() attributes. |
|
1371 |
if (field != NULL) alias_type(idx)->set_field(field); |
|
1372 |
} |
|
1373 |
} |
|
1374 |
||
1375 |
// Fill the cache for next time. |
|
1376 |
ace->_adr_type = adr_type; |
|
1377 |
ace->_index = idx; |
|
1378 |
assert(alias_type(adr_type) == alias_type(idx), "type must be installed"); |
|
1379 |
||
1380 |
// Might as well try to fill the cache for the flattened version, too. |
|
1381 |
AliasCacheEntry* face = probe_alias_cache(flat); |
|
1382 |
if (face->_adr_type == NULL) { |
|
1383 |
face->_adr_type = flat; |
|
1384 |
face->_index = idx; |
|
1385 |
assert(alias_type(flat) == alias_type(idx), "flat type must work too"); |
|
1386 |
} |
|
1387 |
||
1388 |
return alias_type(idx); |
|
1389 |
} |
|
1390 |
||
1391 |
||
1392 |
Compile::AliasType* Compile::alias_type(ciField* field) { |
|
1393 |
const TypeOopPtr* t; |
|
1394 |
if (field->is_static()) |
|
1395 |
t = TypeKlassPtr::make(field->holder()); |
|
1396 |
else |
|
1397 |
t = TypeOopPtr::make_from_klass_raw(field->holder()); |
|
1398 |
AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes())); |
|
1399 |
assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct"); |
|
1400 |
return atp; |
|
1401 |
} |
|
1402 |
||
1403 |
||
1404 |
//------------------------------have_alias_type-------------------------------- |
|
1405 |
bool Compile::have_alias_type(const TypePtr* adr_type) { |
|
1406 |
AliasCacheEntry* ace = probe_alias_cache(adr_type); |
|
1407 |
if (ace->_adr_type == adr_type) { |
|
1408 |
return true; |
|
1409 |
} |
|
1410 |
||
1411 |
// Handle special cases. |
|
1412 |
if (adr_type == NULL) return true; |
|
1413 |
if (adr_type == TypePtr::BOTTOM) return true; |
|
1414 |
||
1415 |
return find_alias_type(adr_type, true) != NULL; |
|
1416 |
} |
|
1417 |
||
1418 |
//-----------------------------must_alias-------------------------------------- |
|
1419 |
// True if all values of the given address type are in the given alias category. |
|
1420 |
bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) { |
|
1421 |
if (alias_idx == AliasIdxBot) return true; // the universal category |
|
1422 |
if (adr_type == NULL) return true; // NULL serves as TypePtr::TOP |
|
1423 |
if (alias_idx == AliasIdxTop) return false; // the empty category |
|
1424 |
if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins |
|
1425 |
||
1426 |
// the only remaining possible overlap is identity |
|
1427 |
int adr_idx = get_alias_index(adr_type); |
|
1428 |
assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, ""); |
|
1429 |
assert(adr_idx == alias_idx || |
|
1430 |
(alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM |
|
1431 |
&& adr_type != TypeOopPtr::BOTTOM), |
|
1432 |
"should not be testing for overlap with an unsafe pointer"); |
|
1433 |
return adr_idx == alias_idx; |
|
1434 |
} |
|
1435 |
||
1436 |
//------------------------------can_alias-------------------------------------- |
|
1437 |
// True if any values of the given address type are in the given alias category. |
|
1438 |
bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) { |
|
1439 |
if (alias_idx == AliasIdxTop) return false; // the empty category |
|
1440 |
if (adr_type == NULL) return false; // NULL serves as TypePtr::TOP |
|
1441 |
if (alias_idx == AliasIdxBot) return true; // the universal category |
|
1442 |
if (adr_type->base() == Type::AnyPtr) return true; // TypePtr::BOTTOM or its twins |
|
1443 |
||
1444 |
// the only remaining possible overlap is identity |
|
1445 |
int adr_idx = get_alias_index(adr_type); |
|
1446 |
assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, ""); |
|
1447 |
return adr_idx == alias_idx; |
|
1448 |
} |
|
1449 |
||
1450 |
||
1451 |
||
1452 |
//---------------------------pop_warm_call------------------------------------- |
|
1453 |
WarmCallInfo* Compile::pop_warm_call() { |
|
1454 |
WarmCallInfo* wci = _warm_calls; |
|
1455 |
if (wci != NULL) _warm_calls = wci->remove_from(wci); |
|
1456 |
return wci; |
|
1457 |
} |
|
1458 |
||
1459 |
//----------------------------Inline_Warm-------------------------------------- |
|
1460 |
int Compile::Inline_Warm() { |
|
1461 |
// If there is room, try to inline some more warm call sites. |
|
1462 |
// %%% Do a graph index compaction pass when we think we're out of space? |
|
1463 |
if (!InlineWarmCalls) return 0; |
|
1464 |
||
1465 |
int calls_made_hot = 0; |
|
1466 |
int room_to_grow = NodeCountInliningCutoff - unique(); |
|
1467 |
int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep); |
|
1468 |
int amount_grown = 0; |
|
1469 |
WarmCallInfo* call; |
|
1470 |
while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) { |
|
1471 |
int est_size = (int)call->size(); |
|
1472 |
if (est_size > (room_to_grow - amount_grown)) { |
|
1473 |
// This one won't fit anyway. Get rid of it. |
|
1474 |
call->make_cold(); |
|
1475 |
continue; |
|
1476 |
} |
|
1477 |
call->make_hot(); |
|
1478 |
calls_made_hot++; |
|
1479 |
amount_grown += est_size; |
|
1480 |
amount_to_grow -= est_size; |
|
1481 |
} |
|
1482 |
||
1483 |
if (calls_made_hot > 0) set_major_progress(); |
|
1484 |
return calls_made_hot; |
|
1485 |
} |
|
1486 |
||
1487 |
||
1488 |
//----------------------------Finish_Warm-------------------------------------- |
|
1489 |
void Compile::Finish_Warm() { |
|
1490 |
if (!InlineWarmCalls) return; |
|
1491 |
if (failing()) return; |
|
1492 |
if (warm_calls() == NULL) return; |
|
1493 |
||
1494 |
// Clean up loose ends, if we are out of space for inlining. |
|
1495 |
WarmCallInfo* call; |
|
1496 |
while ((call = pop_warm_call()) != NULL) { |
|
1497 |
call->make_cold(); |
|
1498 |
} |
|
1499 |
} |
|
1500 |
||
1501 |
||
1502 |
//------------------------------Optimize--------------------------------------- |
|
1503 |
// Given a graph, optimize it. |
|
1504 |
void Compile::Optimize() { |
|
1505 |
TracePhase t1("optimizer", &_t_optimizer, true); |
|
1506 |
||
1507 |
#ifndef PRODUCT |
|
1508 |
if (env()->break_at_compile()) { |
|
1509 |
BREAKPOINT; |
|
1510 |
} |
|
1511 |
||
1512 |
#endif |
|
1513 |
||
1514 |
ResourceMark rm; |
|
1515 |
int loop_opts_cnt; |
|
1516 |
||
1517 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
1518 |
||
768 | 1519 |
print_method("After Parsing"); |
1 | 1520 |
|
1521 |
{ |
|
1522 |
// Iterative Global Value Numbering, including ideal transforms |
|
1523 |
// Initialize IterGVN with types and values from parse-time GVN |
|
1524 |
PhaseIterGVN igvn(initial_gvn()); |
|
1525 |
{ |
|
1526 |
NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); ) |
|
1527 |
igvn.optimize(); |
|
1528 |
} |
|
1529 |
||
1530 |
print_method("Iter GVN 1", 2); |
|
1531 |
||
1532 |
if (failing()) return; |
|
1533 |
||
1534 |
// get rid of the connection graph since it's information is not |
|
1535 |
// updated by optimizations |
|
1536 |
_congraph = NULL; |
|
1537 |
||
1538 |
||
1539 |
// Loop transforms on the ideal graph. Range Check Elimination, |
|
1540 |
// peeling, unrolling, etc. |
|
1541 |
||
1542 |
// Set loop opts counter |
|
1543 |
loop_opts_cnt = num_loop_opts(); |
|
1544 |
if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { |
|
1545 |
{ |
|
1546 |
TracePhase t2("idealLoop", &_t_idealLoop, true); |
|
1547 |
PhaseIdealLoop ideal_loop( igvn, NULL, true ); |
|
1548 |
loop_opts_cnt--; |
|
1549 |
if (major_progress()) print_method("PhaseIdealLoop 1", 2); |
|
1550 |
if (failing()) return; |
|
1551 |
} |
|
1552 |
// Loop opts pass if partial peeling occurred in previous pass |
|
1553 |
if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { |
|
1554 |
TracePhase t3("idealLoop", &_t_idealLoop, true); |
|
1555 |
PhaseIdealLoop ideal_loop( igvn, NULL, false ); |
|
1556 |
loop_opts_cnt--; |
|
1557 |
if (major_progress()) print_method("PhaseIdealLoop 2", 2); |
|
1558 |
if (failing()) return; |
|
1559 |
} |
|
1560 |
// Loop opts pass for loop-unrolling before CCP |
|
1561 |
if(major_progress() && (loop_opts_cnt > 0)) { |
|
1562 |
TracePhase t4("idealLoop", &_t_idealLoop, true); |
|
1563 |
PhaseIdealLoop ideal_loop( igvn, NULL, false ); |
|
1564 |
loop_opts_cnt--; |
|
1565 |
if (major_progress()) print_method("PhaseIdealLoop 3", 2); |
|
1566 |
} |
|
1567 |
} |
|
1568 |
if (failing()) return; |
|
1569 |
||
1570 |
// Conditional Constant Propagation; |
|
1571 |
PhaseCCP ccp( &igvn ); |
|
1572 |
assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)"); |
|
1573 |
{ |
|
1574 |
TracePhase t2("ccp", &_t_ccp, true); |
|
1575 |
ccp.do_transform(); |
|
1576 |
} |
|
1577 |
print_method("PhaseCPP 1", 2); |
|
1578 |
||
1579 |
assert( true, "Break here to ccp.dump_old2new_map()"); |
|
1580 |
||
1581 |
// Iterative Global Value Numbering, including ideal transforms |
|
1582 |
{ |
|
1583 |
NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); ) |
|
1584 |
igvn = ccp; |
|
1585 |
igvn.optimize(); |
|
1586 |
} |
|
1587 |
||
1588 |
print_method("Iter GVN 2", 2); |
|
1589 |
||
1590 |
if (failing()) return; |
|
1591 |
||
1592 |
// Loop transforms on the ideal graph. Range Check Elimination, |
|
1593 |
// peeling, unrolling, etc. |
|
1594 |
if(loop_opts_cnt > 0) { |
|
1595 |
debug_only( int cnt = 0; ); |
|
1596 |
while(major_progress() && (loop_opts_cnt > 0)) { |
|
1597 |
TracePhase t2("idealLoop", &_t_idealLoop, true); |
|
1598 |
assert( cnt++ < 40, "infinite cycle in loop optimization" ); |
|
1599 |
PhaseIdealLoop ideal_loop( igvn, NULL, true ); |
|
1600 |
loop_opts_cnt--; |
|
1601 |
if (major_progress()) print_method("PhaseIdealLoop iterations", 2); |
|
1602 |
if (failing()) return; |
|
1603 |
} |
|
1604 |
} |
|
1605 |
{ |
|
1606 |
NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); ) |
|
1607 |
PhaseMacroExpand mex(igvn); |
|
1608 |
if (mex.expand_macro_nodes()) { |
|
1609 |
assert(failing(), "must bail out w/ explicit message"); |
|
1610 |
return; |
|
1611 |
} |
|
1612 |
} |
|
1613 |
||
1614 |
} // (End scope of igvn; run destructor if necessary for asserts.) |
|
1615 |
||
1616 |
// A method with only infinite loops has no edges entering loops from root |
|
1617 |
{ |
|
1618 |
NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); ) |
|
1619 |
if (final_graph_reshaping()) { |
|
1620 |
assert(failing(), "must bail out w/ explicit message"); |
|
1621 |
return; |
|
1622 |
} |
|
1623 |
} |
|
1624 |
||
1625 |
print_method("Optimize finished", 2); |
|
1626 |
} |
|
1627 |
||
1628 |
||
1629 |
//------------------------------Code_Gen--------------------------------------- |
|
1630 |
// Given a graph, generate code for it |
|
1631 |
void Compile::Code_Gen() { |
|
1632 |
if (failing()) return; |
|
1633 |
||
1634 |
// Perform instruction selection. You might think we could reclaim Matcher |
|
1635 |
// memory PDQ, but actually the Matcher is used in generating spill code. |
|
1636 |
// Internals of the Matcher (including some VectorSets) must remain live |
|
1637 |
// for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage |
|
1638 |
// set a bit in reclaimed memory. |
|
1639 |
||
1640 |
// In debug mode can dump m._nodes.dump() for mapping of ideal to machine |
|
1641 |
// nodes. Mapping is only valid at the root of each matched subtree. |
|
1642 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
1643 |
||
1644 |
Node_List proj_list; |
|
1645 |
Matcher m(proj_list); |
|
1646 |
_matcher = &m; |
|
1647 |
{ |
|
1648 |
TracePhase t2("matcher", &_t_matcher, true); |
|
1649 |
m.match(); |
|
1650 |
} |
|
1651 |
// In debug mode can dump m._nodes.dump() for mapping of ideal to machine |
|
1652 |
// nodes. Mapping is only valid at the root of each matched subtree. |
|
1653 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
1654 |
||
1655 |
// If you have too many nodes, or if matching has failed, bail out |
|
1656 |
check_node_count(0, "out of nodes matching instructions"); |
|
1657 |
if (failing()) return; |
|
1658 |
||
1659 |
// Build a proper-looking CFG |
|
1660 |
PhaseCFG cfg(node_arena(), root(), m); |
|
1661 |
_cfg = &cfg; |
|
1662 |
{ |
|
1663 |
NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); ) |
|
1664 |
cfg.Dominators(); |
|
1665 |
if (failing()) return; |
|
1666 |
||
1667 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
1668 |
||
1669 |
cfg.Estimate_Block_Frequency(); |
|
1670 |
cfg.GlobalCodeMotion(m,unique(),proj_list); |
|
1671 |
||
1672 |
print_method("Global code motion", 2); |
|
1673 |
||
1674 |
if (failing()) return; |
|
1675 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
1676 |
||
1677 |
debug_only( cfg.verify(); ) |
|
1678 |
} |
|
1679 |
NOT_PRODUCT( verify_graph_edges(); ) |
|
1680 |
||
1681 |
PhaseChaitin regalloc(unique(),cfg,m); |
|
1682 |
_regalloc = ®alloc; |
|
1683 |
{ |
|
1684 |
TracePhase t2("regalloc", &_t_registerAllocation, true); |
|
1685 |
// Perform any platform dependent preallocation actions. This is used, |
|
1686 |
// for example, to avoid taking an implicit null pointer exception |
|
1687 |
// using the frame pointer on win95. |
|
1688 |
_regalloc->pd_preallocate_hook(); |
|
1689 |
||
1690 |
// Perform register allocation. After Chaitin, use-def chains are |
|
1691 |
// no longer accurate (at spill code) and so must be ignored. |
|
1692 |
// Node->LRG->reg mappings are still accurate. |
|
1693 |
_regalloc->Register_Allocate(); |
|
1694 |
||
1695 |
// Bail out if the allocator builds too many nodes |
|
1696 |
if (failing()) return; |
|
1697 |
} |
|
1698 |
||
1699 |
// Prior to register allocation we kept empty basic blocks in case the |
|
1700 |
// the allocator needed a place to spill. After register allocation we |
|
1701 |
// are not adding any new instructions. If any basic block is empty, we |
|
1702 |
// can now safely remove it. |
|
1703 |
{ |
|
1704 |
NOT_PRODUCT( TracePhase t2("removeEmpty", &_t_removeEmptyBlocks, TimeCompiler); ) |
|
1705 |
cfg.RemoveEmpty(); |
|
1706 |
} |
|
1707 |
||
1708 |
// Perform any platform dependent postallocation verifications. |
|
1709 |
debug_only( _regalloc->pd_postallocate_verify_hook(); ) |
|
1710 |
||
1711 |
// Apply peephole optimizations |
|
1712 |
if( OptoPeephole ) { |
|
1713 |
NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); ) |
|
1714 |
PhasePeephole peep( _regalloc, cfg); |
|
1715 |
peep.do_transform(); |
|
1716 |
} |
|
1717 |
||
1718 |
// Convert Nodes to instruction bits in a buffer |
|
1719 |
{ |
|
1720 |
// %%%% workspace merge brought two timers together for one job |
|
1721 |
TracePhase t2a("output", &_t_output, true); |
|
1722 |
NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); ) |
|
1723 |
Output(); |
|
1724 |
} |
|
1725 |
||
768 | 1726 |
print_method("Final Code"); |
1 | 1727 |
|
1728 |
// He's dead, Jim. |
|
1729 |
_cfg = (PhaseCFG*)0xdeadbeef; |
|
1730 |
_regalloc = (PhaseChaitin*)0xdeadbeef; |
|
1731 |
} |
|
1732 |
||
1733 |
||
1734 |
//------------------------------dump_asm--------------------------------------- |
|
1735 |
// Dump formatted assembly |
|
1736 |
#ifndef PRODUCT |
|
1737 |
void Compile::dump_asm(int *pcs, uint pc_limit) { |
|
1738 |
bool cut_short = false; |
|
1739 |
tty->print_cr("#"); |
|
1740 |
tty->print("# "); _tf->dump(); tty->cr(); |
|
1741 |
tty->print_cr("#"); |
|
1742 |
||
1743 |
// For all blocks |
|
1744 |
int pc = 0x0; // Program counter |
|
1745 |
char starts_bundle = ' '; |
|
1746 |
_regalloc->dump_frame(); |
|
1747 |
||
1748 |
Node *n = NULL; |
|
1749 |
for( uint i=0; i<_cfg->_num_blocks; i++ ) { |
|
1750 |
if (VMThread::should_terminate()) { cut_short = true; break; } |
|
1751 |
Block *b = _cfg->_blocks[i]; |
|
1752 |
if (b->is_connector() && !Verbose) continue; |
|
1753 |
n = b->_nodes[0]; |
|
1754 |
if (pcs && n->_idx < pc_limit) |
|
1755 |
tty->print("%3.3x ", pcs[n->_idx]); |
|
1756 |
else |
|
1757 |
tty->print(" "); |
|
1758 |
b->dump_head( &_cfg->_bbs ); |
|
1759 |
if (b->is_connector()) { |
|
1760 |
tty->print_cr(" # Empty connector block"); |
|
1761 |
} else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) { |
|
1762 |
tty->print_cr(" # Block is sole successor of call"); |
|
1763 |
} |
|
1764 |
||
1765 |
// For all instructions |
|
1766 |
Node *delay = NULL; |
|
1767 |
for( uint j = 0; j<b->_nodes.size(); j++ ) { |
|
1768 |
if (VMThread::should_terminate()) { cut_short = true; break; } |
|
1769 |
n = b->_nodes[j]; |
|
1770 |
if (valid_bundle_info(n)) { |
|
1771 |
Bundle *bundle = node_bundling(n); |
|
1772 |
if (bundle->used_in_unconditional_delay()) { |
|
1773 |
delay = n; |
|
1774 |
continue; |
|
1775 |
} |
|
1776 |
if (bundle->starts_bundle()) |
|
1777 |
starts_bundle = '+'; |
|
1778 |
} |
|
1779 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1780 |
if (WizardMode) n->dump(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1781 |
|
1 | 1782 |
if( !n->is_Region() && // Dont print in the Assembly |
1783 |
!n->is_Phi() && // a few noisely useless nodes |
|
1784 |
!n->is_Proj() && |
|
1785 |
!n->is_MachTemp() && |
|
1786 |
!n->is_Catch() && // Would be nice to print exception table targets |
|
1787 |
!n->is_MergeMem() && // Not very interesting |
|
1788 |
!n->is_top() && // Debug info table constants |
|
1789 |
!(n->is_Con() && !n->is_Mach())// Debug info table constants |
|
1790 |
) { |
|
1791 |
if (pcs && n->_idx < pc_limit) |
|
1792 |
tty->print("%3.3x", pcs[n->_idx]); |
|
1793 |
else |
|
1794 |
tty->print(" "); |
|
1795 |
tty->print(" %c ", starts_bundle); |
|
1796 |
starts_bundle = ' '; |
|
1797 |
tty->print("\t"); |
|
1798 |
n->format(_regalloc, tty); |
|
1799 |
tty->cr(); |
|
1800 |
} |
|
1801 |
||
1802 |
// If we have an instruction with a delay slot, and have seen a delay, |
|
1803 |
// then back up and print it |
|
1804 |
if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) { |
|
1805 |
assert(delay != NULL, "no unconditional delay instruction"); |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1806 |
if (WizardMode) delay->dump(); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1807 |
|
1 | 1808 |
if (node_bundling(delay)->starts_bundle()) |
1809 |
starts_bundle = '+'; |
|
1810 |
if (pcs && n->_idx < pc_limit) |
|
1811 |
tty->print("%3.3x", pcs[n->_idx]); |
|
1812 |
else |
|
1813 |
tty->print(" "); |
|
1814 |
tty->print(" %c ", starts_bundle); |
|
1815 |
starts_bundle = ' '; |
|
1816 |
tty->print("\t"); |
|
1817 |
delay->format(_regalloc, tty); |
|
1818 |
tty->print_cr(""); |
|
1819 |
delay = NULL; |
|
1820 |
} |
|
1821 |
||
1822 |
// Dump the exception table as well |
|
1823 |
if( n->is_Catch() && (Verbose || WizardMode) ) { |
|
1824 |
// Print the exception table for this offset |
|
1825 |
_handler_table.print_subtable_for(pc); |
|
1826 |
} |
|
1827 |
} |
|
1828 |
||
1829 |
if (pcs && n->_idx < pc_limit) |
|
1830 |
tty->print_cr("%3.3x", pcs[n->_idx]); |
|
1831 |
else |
|
1832 |
tty->print_cr(""); |
|
1833 |
||
1834 |
assert(cut_short || delay == NULL, "no unconditional delay branch"); |
|
1835 |
||
1836 |
} // End of per-block dump |
|
1837 |
tty->print_cr(""); |
|
1838 |
||
1839 |
if (cut_short) tty->print_cr("*** disassembly is cut short ***"); |
|
1840 |
} |
|
1841 |
#endif |
|
1842 |
||
1843 |
//------------------------------Final_Reshape_Counts--------------------------- |
|
1844 |
// This class defines counters to help identify when a method |
|
1845 |
// may/must be executed using hardware with only 24-bit precision. |
|
1846 |
struct Final_Reshape_Counts : public StackObj { |
|
1847 |
int _call_count; // count non-inlined 'common' calls |
|
1848 |
int _float_count; // count float ops requiring 24-bit precision |
|
1849 |
int _double_count; // count double ops requiring more precision |
|
1850 |
int _java_call_count; // count non-inlined 'java' calls |
|
1851 |
VectorSet _visited; // Visitation flags |
|
1852 |
Node_List _tests; // Set of IfNodes & PCTableNodes |
|
1853 |
||
1854 |
Final_Reshape_Counts() : |
|
1855 |
_call_count(0), _float_count(0), _double_count(0), _java_call_count(0), |
|
1856 |
_visited( Thread::current()->resource_area() ) { } |
|
1857 |
||
1858 |
void inc_call_count () { _call_count ++; } |
|
1859 |
void inc_float_count () { _float_count ++; } |
|
1860 |
void inc_double_count() { _double_count++; } |
|
1861 |
void inc_java_call_count() { _java_call_count++; } |
|
1862 |
||
1863 |
int get_call_count () const { return _call_count ; } |
|
1864 |
int get_float_count () const { return _float_count ; } |
|
1865 |
int get_double_count() const { return _double_count; } |
|
1866 |
int get_java_call_count() const { return _java_call_count; } |
|
1867 |
}; |
|
1868 |
||
1869 |
static bool oop_offset_is_sane(const TypeInstPtr* tp) { |
|
1870 |
ciInstanceKlass *k = tp->klass()->as_instance_klass(); |
|
1871 |
// Make sure the offset goes inside the instance layout. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
1872 |
return k->contains_field_offset(tp->offset()); |
1 | 1873 |
// Note that OffsetBot and OffsetTop are very negative. |
1874 |
} |
|
1875 |
||
1876 |
//------------------------------final_graph_reshaping_impl---------------------- |
|
1877 |
// Implement items 1-5 from final_graph_reshaping below. |
|
1878 |
static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) { |
|
1879 |
||
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1880 |
if ( n->outcnt() == 0 ) return; // dead node |
1 | 1881 |
uint nop = n->Opcode(); |
1882 |
||
1883 |
// Check for 2-input instruction with "last use" on right input. |
|
1884 |
// Swap to left input. Implements item (2). |
|
1885 |
if( n->req() == 3 && // two-input instruction |
|
1886 |
n->in(1)->outcnt() > 1 && // left use is NOT a last use |
|
1887 |
(!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop |
|
1888 |
n->in(2)->outcnt() == 1 &&// right use IS a last use |
|
1889 |
!n->in(2)->is_Con() ) { // right use is not a constant |
|
1890 |
// Check for commutative opcode |
|
1891 |
switch( nop ) { |
|
1892 |
case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL: |
|
1893 |
case Op_MaxI: case Op_MinI: |
|
1894 |
case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL: |
|
1895 |
case Op_AndL: case Op_XorL: case Op_OrL: |
|
1896 |
case Op_AndI: case Op_XorI: case Op_OrI: { |
|
1897 |
// Move "last use" input to left by swapping inputs |
|
1898 |
n->swap_edges(1, 2); |
|
1899 |
break; |
|
1900 |
} |
|
1901 |
default: |
|
1902 |
break; |
|
1903 |
} |
|
1904 |
} |
|
1905 |
||
1906 |
// Count FPU ops and common calls, implements item (3) |
|
1907 |
switch( nop ) { |
|
1908 |
// Count all float operations that may use FPU |
|
1909 |
case Op_AddF: |
|
1910 |
case Op_SubF: |
|
1911 |
case Op_MulF: |
|
1912 |
case Op_DivF: |
|
1913 |
case Op_NegF: |
|
1914 |
case Op_ModF: |
|
1915 |
case Op_ConvI2F: |
|
1916 |
case Op_ConF: |
|
1917 |
case Op_CmpF: |
|
1918 |
case Op_CmpF3: |
|
1919 |
// case Op_ConvL2F: // longs are split into 32-bit halves |
|
1920 |
fpu.inc_float_count(); |
|
1921 |
break; |
|
1922 |
||
1923 |
case Op_ConvF2D: |
|
1924 |
case Op_ConvD2F: |
|
1925 |
fpu.inc_float_count(); |
|
1926 |
fpu.inc_double_count(); |
|
1927 |
break; |
|
1928 |
||
1929 |
// Count all double operations that may use FPU |
|
1930 |
case Op_AddD: |
|
1931 |
case Op_SubD: |
|
1932 |
case Op_MulD: |
|
1933 |
case Op_DivD: |
|
1934 |
case Op_NegD: |
|
1935 |
case Op_ModD: |
|
1936 |
case Op_ConvI2D: |
|
1937 |
case Op_ConvD2I: |
|
1938 |
// case Op_ConvL2D: // handled by leaf call |
|
1939 |
// case Op_ConvD2L: // handled by leaf call |
|
1940 |
case Op_ConD: |
|
1941 |
case Op_CmpD: |
|
1942 |
case Op_CmpD3: |
|
1943 |
fpu.inc_double_count(); |
|
1944 |
break; |
|
1945 |
case Op_Opaque1: // Remove Opaque Nodes before matching |
|
1946 |
case Op_Opaque2: // Remove Opaque Nodes before matching |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
1947 |
n->subsume_by(n->in(1)); |
1 | 1948 |
break; |
1949 |
case Op_CallStaticJava: |
|
1950 |
case Op_CallJava: |
|
1951 |
case Op_CallDynamicJava: |
|
1952 |
fpu.inc_java_call_count(); // Count java call site; |
|
1953 |
case Op_CallRuntime: |
|
1954 |
case Op_CallLeaf: |
|
1955 |
case Op_CallLeafNoFP: { |
|
1956 |
assert( n->is_Call(), "" ); |
|
1957 |
CallNode *call = n->as_Call(); |
|
1958 |
// Count call sites where the FP mode bit would have to be flipped. |
|
1959 |
// Do not count uncommon runtime calls: |
|
1960 |
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking, |
|
1961 |
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ... |
|
1962 |
if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) { |
|
1963 |
fpu.inc_call_count(); // Count the call site |
|
1964 |
} else { // See if uncommon argument is shared |
|
1965 |
Node *n = call->in(TypeFunc::Parms); |
|
1966 |
int nop = n->Opcode(); |
|
1967 |
// Clone shared simple arguments to uncommon calls, item (1). |
|
1968 |
if( n->outcnt() > 1 && |
|
1969 |
!n->is_Proj() && |
|
1970 |
nop != Op_CreateEx && |
|
1971 |
nop != Op_CheckCastPP && |
|
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
1972 |
nop != Op_DecodeN && |
1 | 1973 |
!n->is_Mem() ) { |
1974 |
Node *x = n->clone(); |
|
1975 |
call->set_req( TypeFunc::Parms, x ); |
|
1976 |
} |
|
1977 |
} |
|
1978 |
break; |
|
1979 |
} |
|
1980 |
||
1981 |
case Op_StoreD: |
|
1982 |
case Op_LoadD: |
|
1983 |
case Op_LoadD_unaligned: |
|
1984 |
fpu.inc_double_count(); |
|
1985 |
goto handle_mem; |
|
1986 |
case Op_StoreF: |
|
1987 |
case Op_LoadF: |
|
1988 |
fpu.inc_float_count(); |
|
1989 |
goto handle_mem; |
|
1990 |
||
1991 |
case Op_StoreB: |
|
1992 |
case Op_StoreC: |
|
1993 |
case Op_StoreCM: |
|
1994 |
case Op_StorePConditional: |
|
1995 |
case Op_StoreI: |
|
1996 |
case Op_StoreL: |
|
1997 |
case Op_StoreLConditional: |
|
1998 |
case Op_CompareAndSwapI: |
|
1999 |
case Op_CompareAndSwapL: |
|
2000 |
case Op_CompareAndSwapP: |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
2001 |
case Op_CompareAndSwapN: |
1 | 2002 |
case Op_StoreP: |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
2003 |
case Op_StoreN: |
1 | 2004 |
case Op_LoadB: |
2005 |
case Op_LoadC: |
|
2006 |
case Op_LoadI: |
|
2007 |
case Op_LoadKlass: |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2008 |
case Op_LoadNKlass: |
1 | 2009 |
case Op_LoadL: |
2010 |
case Op_LoadL_unaligned: |
|
2011 |
case Op_LoadPLocked: |
|
2012 |
case Op_LoadLLocked: |
|
2013 |
case Op_LoadP: |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
347
diff
changeset
|
2014 |
case Op_LoadN: |
1 | 2015 |
case Op_LoadRange: |
2016 |
case Op_LoadS: { |
|
2017 |
handle_mem: |
|
2018 |
#ifdef ASSERT |
|
2019 |
if( VerifyOptoOopOffsets ) { |
|
2020 |
assert( n->is_Mem(), "" ); |
|
2021 |
MemNode *mem = (MemNode*)n; |
|
2022 |
// Check to see if address types have grounded out somehow. |
|
2023 |
const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr(); |
|
2024 |
assert( !tp || oop_offset_is_sane(tp), "" ); |
|
2025 |
} |
|
2026 |
#endif |
|
2027 |
break; |
|
2028 |
} |
|
2029 |
||
2030 |
case Op_AddP: { // Assert sane base pointers |
|
608
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2031 |
Node *addp = n->in(AddPNode::Address); |
1 | 2032 |
assert( !addp->is_AddP() || |
2033 |
addp->in(AddPNode::Base)->is_top() || // Top OK for allocation |
|
2034 |
addp->in(AddPNode::Base) == n->in(AddPNode::Base), |
|
2035 |
"Base pointers must match" ); |
|
608
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2036 |
#ifdef _LP64 |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2037 |
if (UseCompressedOops && |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2038 |
addp->Opcode() == Op_ConP && |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2039 |
addp == n->in(AddPNode::Base) && |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2040 |
n->in(AddPNode::Offset)->is_Con()) { |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2041 |
// Use addressing with narrow klass to load with offset on x86. |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2042 |
// On sparc loading 32-bits constant and decoding it have less |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2043 |
// instructions (4) then load 64-bits constant (7). |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2044 |
// Do this transformation here since IGVN will convert ConN back to ConP. |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2045 |
const Type* t = addp->bottom_type(); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2046 |
if (t->isa_oopptr()) { |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2047 |
Node* nn = NULL; |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2048 |
|
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2049 |
// Look for existing ConN node of the same exact type. |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2050 |
Compile* C = Compile::current(); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2051 |
Node* r = C->root(); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2052 |
uint cnt = r->outcnt(); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2053 |
for (uint i = 0; i < cnt; i++) { |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2054 |
Node* m = r->raw_out(i); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2055 |
if (m!= NULL && m->Opcode() == Op_ConN && |
767
64fb1fd7186d
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
kvn
parents:
608
diff
changeset
|
2056 |
m->bottom_type()->make_ptr() == t) { |
608
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2057 |
nn = m; |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2058 |
break; |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2059 |
} |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2060 |
} |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2061 |
if (nn != NULL) { |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2062 |
// Decode a narrow oop to match address |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2063 |
// [R12 + narrow_oop_reg<<3 + offset] |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2064 |
nn = new (C, 2) DecodeNNode(nn, t); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2065 |
n->set_req(AddPNode::Base, nn); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2066 |
n->set_req(AddPNode::Address, nn); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2067 |
if (addp->outcnt() == 0) { |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2068 |
addp->disconnect_inputs(NULL); |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2069 |
} |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2070 |
} |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2071 |
} |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2072 |
} |
fe8c5fbbc54e
6709093: Compressed Oops: reduce size of compiled methods
kvn
parents:
594
diff
changeset
|
2073 |
#endif |
1 | 2074 |
break; |
2075 |
} |
|
2076 |
||
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2077 |
#ifdef _LP64 |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2078 |
case Op_CastPP: |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2079 |
if (n->in(1)->is_DecodeN() && UseImplicitNullCheckForNarrowOop) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2080 |
Compile* C = Compile::current(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2081 |
Node* in1 = n->in(1); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2082 |
const Type* t = n->bottom_type(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2083 |
Node* new_in1 = in1->clone(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2084 |
new_in1->as_DecodeN()->set_type(t); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2085 |
|
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2086 |
if (!Matcher::clone_shift_expressions) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2087 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2088 |
// x86, ARM and friends can handle 2 adds in addressing mode |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2089 |
// and Matcher can fold a DecodeN node into address by using |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2090 |
// a narrow oop directly and do implicit NULL check in address: |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2091 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2092 |
// [R12 + narrow_oop_reg<<3 + offset] |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2093 |
// NullCheck narrow_oop_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2094 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2095 |
// On other platforms (Sparc) we have to keep new DecodeN node and |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2096 |
// use it to do implicit NULL check in address: |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2097 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2098 |
// decode_not_null narrow_oop_reg, base_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2099 |
// [base_reg + offset] |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2100 |
// NullCheck base_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2101 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2102 |
// Pin the new DecodeN node to non-null path on these patforms (Sparc) |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2103 |
// to keep the information to which NULL check the new DecodeN node |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2104 |
// corresponds to use it as value in implicit_null_check(). |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2105 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2106 |
new_in1->set_req(0, n->in(0)); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2107 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2108 |
|
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2109 |
n->subsume_by(new_in1); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2110 |
if (in1->outcnt() == 0) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2111 |
in1->disconnect_inputs(NULL); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2112 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2113 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2114 |
break; |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2115 |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2116 |
case Op_CmpP: |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2117 |
// Do this transformation here to preserve CmpPNode::sub() and |
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2118 |
// other TypePtr related Ideal optimizations (for example, ptr nullness). |
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2119 |
if (n->in(1)->is_DecodeN() || n->in(2)->is_DecodeN()) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2120 |
Node* in1 = n->in(1); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2121 |
Node* in2 = n->in(2); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2122 |
if (!in1->is_DecodeN()) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2123 |
in2 = in1; |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2124 |
in1 = n->in(2); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2125 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2126 |
assert(in1->is_DecodeN(), "sanity"); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2127 |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2128 |
Compile* C = Compile::current(); |
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2129 |
Node* new_in2 = NULL; |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2130 |
if (in2->is_DecodeN()) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2131 |
new_in2 = in2->in(1); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2132 |
} else if (in2->Opcode() == Op_ConP) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2133 |
const Type* t = in2->bottom_type(); |
1129
ec4dfac10759
6741004: UseLargePages + UseCompressedOops breaks implicit null checking guard page
coleenp
parents:
1055
diff
changeset
|
2134 |
if (t == TypePtr::NULL_PTR && UseImplicitNullCheckForNarrowOop) { |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2135 |
new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2136 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2137 |
// This transformation together with CastPP transformation above |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2138 |
// will generated code for implicit NULL checks for compressed oops. |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2139 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2140 |
// The original code after Optimize() |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2141 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2142 |
// LoadN memory, narrow_oop_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2143 |
// decode narrow_oop_reg, base_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2144 |
// CmpP base_reg, NULL |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2145 |
// CastPP base_reg // NotNull |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2146 |
// Load [base_reg + offset], val_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2147 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2148 |
// after these transformations will be |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2149 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2150 |
// LoadN memory, narrow_oop_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2151 |
// CmpN narrow_oop_reg, NULL |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2152 |
// decode_not_null narrow_oop_reg, base_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2153 |
// Load [base_reg + offset], val_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2154 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2155 |
// and the uncommon path (== NULL) will use narrow_oop_reg directly |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2156 |
// since narrow oops can be used in debug info now (see the code in |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2157 |
// final_graph_reshaping_walk()). |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2158 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2159 |
// At the end the code will be matched to |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2160 |
// on x86: |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2161 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2162 |
// Load_narrow_oop memory, narrow_oop_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2163 |
// Load [R12 + narrow_oop_reg<<3 + offset], val_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2164 |
// NullCheck narrow_oop_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2165 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2166 |
// and on sparc: |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2167 |
// |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2168 |
// Load_narrow_oop memory, narrow_oop_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2169 |
// decode_not_null narrow_oop_reg, base_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2170 |
// Load [base_reg + offset], val_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2171 |
// NullCheck base_reg |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2172 |
// |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2173 |
} else if (t->isa_oopptr()) { |
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2174 |
new_in2 = ConNode::make(C, t->make_narrowoop()); |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2175 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2176 |
} |
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2177 |
if (new_in2 != NULL) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2178 |
Node* cmpN = new (C, 3) CmpNNode(in1->in(1), new_in2); |
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2179 |
n->subsume_by( cmpN ); |
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2180 |
if (in1->outcnt() == 0) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2181 |
in1->disconnect_inputs(NULL); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2182 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2183 |
if (in2->outcnt() == 0) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2184 |
in2->disconnect_inputs(NULL); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2185 |
} |
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2186 |
} |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2187 |
} |
1055
f4fb9fb08038
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
961
diff
changeset
|
2188 |
break; |
1400
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2189 |
|
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2190 |
case Op_DecodeN: |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2191 |
assert(!n->in(1)->is_EncodeP(), "should be optimized out"); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2192 |
break; |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2193 |
|
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2194 |
case Op_EncodeP: { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2195 |
Node* in1 = n->in(1); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2196 |
if (in1->is_DecodeN()) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2197 |
n->subsume_by(in1->in(1)); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2198 |
} else if (in1->Opcode() == Op_ConP) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2199 |
Compile* C = Compile::current(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2200 |
const Type* t = in1->bottom_type(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2201 |
if (t == TypePtr::NULL_PTR) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2202 |
n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR)); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2203 |
} else if (t->isa_oopptr()) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2204 |
n->subsume_by(ConNode::make(C, t->make_narrowoop())); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2205 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2206 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2207 |
if (in1->outcnt() == 0) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2208 |
in1->disconnect_inputs(NULL); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2209 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2210 |
break; |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2211 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2212 |
|
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2213 |
case Op_Phi: |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2214 |
if (n->as_Phi()->bottom_type()->isa_narrowoop()) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2215 |
// The EncodeP optimization may create Phi with the same edges |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2216 |
// for all paths. It is not handled well by Register Allocator. |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2217 |
Node* unique_in = n->in(1); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2218 |
assert(unique_in != NULL, ""); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2219 |
uint cnt = n->req(); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2220 |
for (uint i = 2; i < cnt; i++) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2221 |
Node* m = n->in(i); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2222 |
assert(m != NULL, ""); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2223 |
if (unique_in != m) |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2224 |
unique_in = NULL; |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2225 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2226 |
if (unique_in != NULL) { |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2227 |
n->subsume_by(unique_in); |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2228 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2229 |
} |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2230 |
break; |
afd034bb8c2e
6747051: Improve code and implicit null check generation for compressed oops
kvn
parents:
1399
diff
changeset
|
2231 |
|
590
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2232 |
#endif |
2954744d7bba
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
589
diff
changeset
|
2233 |
|
1 | 2234 |
case Op_ModI: |
2235 |
if (UseDivMod) { |
|
2236 |
// Check if a%b and a/b both exist |
|
2237 |
Node* d = n->find_similar(Op_DivI); |
|
2238 |
if (d) { |
|
2239 |
// Replace them with a fused divmod if supported |
|
2240 |
Compile* C = Compile::current(); |
|
2241 |
if (Matcher::has_match_rule(Op_DivModI)) { |
|
2242 |
DivModINode* divmod = DivModINode::make(C, n); |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2243 |
d->subsume_by(divmod->div_proj()); |
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2244 |
n->subsume_by(divmod->mod_proj()); |
1 | 2245 |
} else { |
2246 |
// replace a%b with a-((a/b)*b) |
|
2247 |
Node* mult = new (C, 3) MulINode(d, d->in(2)); |
|
2248 |
Node* sub = new (C, 3) SubINode(d->in(1), mult); |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2249 |
n->subsume_by( sub ); |
1 | 2250 |
} |
2251 |
} |
|
2252 |
} |
|
2253 |
break; |
|
2254 |
||
2255 |
case Op_ModL: |
|
2256 |
if (UseDivMod) { |
|
2257 |
// Check if a%b and a/b both exist |
|
2258 |
Node* d = n->find_similar(Op_DivL); |
|
2259 |
if (d) { |
|
2260 |
// Replace them with a fused divmod if supported |
|
2261 |
Compile* C = Compile::current(); |
|
2262 |
if (Matcher::has_match_rule(Op_DivModL)) { |
|
2263 |
DivModLNode* divmod = DivModLNode::make(C, n); |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2264 |
d->subsume_by(divmod->div_proj()); |
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2265 |
n->subsume_by(divmod->mod_proj()); |
1 | 2266 |
} else { |
2267 |
// replace a%b with a-((a/b)*b) |
|
2268 |
Node* mult = new (C, 3) MulLNode(d, d->in(2)); |
|
2269 |
Node* sub = new (C, 3) SubLNode(d->in(1), mult); |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2270 |
n->subsume_by( sub ); |
1 | 2271 |
} |
2272 |
} |
|
2273 |
} |
|
2274 |
break; |
|
2275 |
||
2276 |
case Op_Load16B: |
|
2277 |
case Op_Load8B: |
|
2278 |
case Op_Load4B: |
|
2279 |
case Op_Load8S: |
|
2280 |
case Op_Load4S: |
|
2281 |
case Op_Load2S: |
|
2282 |
case Op_Load8C: |
|
2283 |
case Op_Load4C: |
|
2284 |
case Op_Load2C: |
|
2285 |
case Op_Load4I: |
|
2286 |
case Op_Load2I: |
|
2287 |
case Op_Load2L: |
|
2288 |
case Op_Load4F: |
|
2289 |
case Op_Load2F: |
|
2290 |
case Op_Load2D: |
|
2291 |
case Op_Store16B: |
|
2292 |
case Op_Store8B: |
|
2293 |
case Op_Store4B: |
|
2294 |
case Op_Store8C: |
|
2295 |
case Op_Store4C: |
|
2296 |
case Op_Store2C: |
|
2297 |
case Op_Store4I: |
|
2298 |
case Op_Store2I: |
|
2299 |
case Op_Store2L: |
|
2300 |
case Op_Store4F: |
|
2301 |
case Op_Store2F: |
|
2302 |
case Op_Store2D: |
|
2303 |
break; |
|
2304 |
||
2305 |
case Op_PackB: |
|
2306 |
case Op_PackS: |
|
2307 |
case Op_PackC: |
|
2308 |
case Op_PackI: |
|
2309 |
case Op_PackF: |
|
2310 |
case Op_PackL: |
|
2311 |
case Op_PackD: |
|
2312 |
if (n->req()-1 > 2) { |
|
2313 |
// Replace many operand PackNodes with a binary tree for matching |
|
2314 |
PackNode* p = (PackNode*) n; |
|
2315 |
Node* btp = p->binaryTreePack(Compile::current(), 1, n->req()); |
|
594
9f4474e5dbaf
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
590
diff
changeset
|
2316 |
n->subsume_by(btp); |
1 | 2317 |
} |
2318 |
break; |
|
2319 |
default: |
|
2320 |
assert( !n->is_Call(), "" ); |
|
2321 |
assert( !n->is_Mem(), "" ); |
|
2322 |
break; |
|
2323 |
} |
|
374 | 2324 |
|
2325 |
// Collect CFG split points |
|
2326 |
if (n->is_MultiBranch()) |
|
2327 |
fpu._tests.push(n); |
|
1 | 2328 |
} |
2329 |
||
2330 |
//------------------------------final_graph_reshaping_walk--------------------- |
|
2331 |
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(), |
|
2332 |
// requires that the walk visits a node's inputs before visiting the node. |
|
2333 |
static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &fpu ) { |
|
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2334 |
ResourceArea *area = Thread::current()->resource_area(); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2335 |
Unique_Node_List sfpt(area); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2336 |
|
1 | 2337 |
fpu._visited.set(root->_idx); // first, mark node as visited |
2338 |
uint cnt = root->req(); |
|
2339 |
Node *n = root; |
|
2340 |
uint i = 0; |
|
2341 |
while (true) { |
|
2342 |
if (i < cnt) { |
|
2343 |
// Place all non-visited non-null inputs onto stack |
|
2344 |
Node* m = n->in(i); |
|
2345 |
++i; |
|
2346 |
if (m != NULL && !fpu._visited.test_set(m->_idx)) { |
|
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2347 |
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2348 |
sfpt.push(m); |
1 | 2349 |
cnt = m->req(); |
2350 |
nstack.push(n, i); // put on stack parent and next input's index |
|
2351 |
n = m; |
|
2352 |
i = 0; |
|
2353 |
} |
|
2354 |
} else { |
|
2355 |
// Now do post-visit work |
|
2356 |
final_graph_reshaping_impl( n, fpu ); |
|
2357 |
if (nstack.is_empty()) |
|
2358 |
break; // finished |
|
2359 |
n = nstack.node(); // Get node from stack |
|
2360 |
cnt = n->req(); |
|
2361 |
i = nstack.index(); |
|
2362 |
nstack.pop(); // Shift to the next node on stack |
|
2363 |
} |
|
2364 |
} |
|
1135
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2365 |
|
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2366 |
// Go over safepoints nodes to skip DecodeN nodes for debug edges. |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2367 |
// It could be done for an uncommon traps or any safepoints/calls |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2368 |
// if the DecodeN node is referenced only in a debug info. |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2369 |
while (sfpt.size() > 0) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2370 |
n = sfpt.pop(); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2371 |
JVMState *jvms = n->as_SafePoint()->jvms(); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2372 |
assert(jvms != NULL, "sanity"); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2373 |
int start = jvms->debug_start(); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2374 |
int end = n->req(); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2375 |
bool is_uncommon = (n->is_CallStaticJava() && |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2376 |
n->as_CallStaticJava()->uncommon_trap_request() != 0); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2377 |
for (int j = start; j < end; j++) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2378 |
Node* in = n->in(j); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2379 |
if (in->is_DecodeN()) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2380 |
bool safe_to_skip = true; |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2381 |
if (!is_uncommon ) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2382 |
// Is it safe to skip? |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2383 |
for (uint i = 0; i < in->outcnt(); i++) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2384 |
Node* u = in->raw_out(i); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2385 |
if (!u->is_SafePoint() || |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2386 |
u->is_Call() && u->as_Call()->has_non_debug_use(n)) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2387 |
safe_to_skip = false; |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2388 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2389 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2390 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2391 |
if (safe_to_skip) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2392 |
n->set_req(j, in->in(1)); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2393 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2394 |
if (in->outcnt() == 0) { |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2395 |
in->disconnect_inputs(NULL); |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2396 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2397 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2398 |
} |
9487203e5789
6706829: Compressed Oops: add debug info for narrow oops
kvn
parents:
1055
diff
changeset
|
2399 |
} |
1 | 2400 |
} |
2401 |
||
2402 |
//------------------------------final_graph_reshaping-------------------------- |
|
2403 |
// Final Graph Reshaping. |
|
2404 |
// |
|
2405 |
// (1) Clone simple inputs to uncommon calls, so they can be scheduled late |
|
2406 |
// and not commoned up and forced early. Must come after regular |
|
2407 |
// optimizations to avoid GVN undoing the cloning. Clone constant |
|
2408 |
// inputs to Loop Phis; these will be split by the allocator anyways. |
|
2409 |
// Remove Opaque nodes. |
|
2410 |
// (2) Move last-uses by commutative operations to the left input to encourage |
|
2411 |
// Intel update-in-place two-address operations and better register usage |
|
2412 |
// on RISCs. Must come after regular optimizations to avoid GVN Ideal |
|
2413 |
// calls canonicalizing them back. |
|
2414 |
// (3) Count the number of double-precision FP ops, single-precision FP ops |
|
2415 |
// and call sites. On Intel, we can get correct rounding either by |
|
2416 |
// forcing singles to memory (requires extra stores and loads after each |
|
2417 |
// FP bytecode) or we can set a rounding mode bit (requires setting and |
|
2418 |
// clearing the mode bit around call sites). The mode bit is only used |
|
2419 |
// if the relative frequency of single FP ops to calls is low enough. |
|
2420 |
// This is a key transform for SPEC mpeg_audio. |
|
2421 |
// (4) Detect infinite loops; blobs of code reachable from above but not |
|
2422 |
// below. Several of the Code_Gen algorithms fail on such code shapes, |
|
2423 |
// so we simply bail out. Happens a lot in ZKM.jar, but also happens |
|
2424 |
// from time to time in other codes (such as -Xcomp finalizer loops, etc). |
|
2425 |
// Detection is by looking for IfNodes where only 1 projection is |
|
2426 |
// reachable from below or CatchNodes missing some targets. |
|
2427 |
// (5) Assert for insane oop offsets in debug mode. |
|
2428 |
||
2429 |
bool Compile::final_graph_reshaping() { |
|
2430 |
// an infinite loop may have been eliminated by the optimizer, |
|
2431 |
// in which case the graph will be empty. |
|
2432 |
if (root()->req() == 1) { |
|
2433 |
record_method_not_compilable("trivial infinite loop"); |
|
2434 |
return true; |
|
2435 |
} |
|
2436 |
||
2437 |
Final_Reshape_Counts fpu; |
|
2438 |
||
2439 |
// Visit everybody reachable! |
|
2440 |
// Allocate stack of size C->unique()/2 to avoid frequent realloc |
|
2441 |
Node_Stack nstack(unique() >> 1); |
|
2442 |
final_graph_reshaping_walk(nstack, root(), fpu); |
|
2443 |
||
2444 |
// Check for unreachable (from below) code (i.e., infinite loops). |
|
2445 |
for( uint i = 0; i < fpu._tests.size(); i++ ) { |
|
374 | 2446 |
MultiBranchNode *n = fpu._tests[i]->as_MultiBranch(); |
2447 |
// Get number of CFG targets. |
|
1 | 2448 |
// Note that PCTables include exception targets after calls. |
374 | 2449 |
uint required_outcnt = n->required_outcnt(); |
2450 |
if (n->outcnt() != required_outcnt) { |
|
1 | 2451 |
// Check for a few special cases. Rethrow Nodes never take the |
2452 |
// 'fall-thru' path, so expected kids is 1 less. |
|
2453 |
if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) { |
|
2454 |
if (n->in(0)->in(0)->is_Call()) { |
|
2455 |
CallNode *call = n->in(0)->in(0)->as_Call(); |
|
2456 |
if (call->entry_point() == OptoRuntime::rethrow_stub()) { |
|
374 | 2457 |
required_outcnt--; // Rethrow always has 1 less kid |
1 | 2458 |
} else if (call->req() > TypeFunc::Parms && |
2459 |
call->is_CallDynamicJava()) { |
|
2460 |
// Check for null receiver. In such case, the optimizer has |
|
2461 |
// detected that the virtual call will always result in a null |
|
2462 |
// pointer exception. The fall-through projection of this CatchNode |
|
2463 |
// will not be populated. |
|
2464 |
Node *arg0 = call->in(TypeFunc::Parms); |
|
2465 |
if (arg0->is_Type() && |
|
2466 |
arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) { |
|
374 | 2467 |
required_outcnt--; |
1 | 2468 |
} |
2469 |
} else if (call->entry_point() == OptoRuntime::new_array_Java() && |
|
2470 |
call->req() > TypeFunc::Parms+1 && |
|
2471 |
call->is_CallStaticJava()) { |
|
2472 |
// Check for negative array length. In such case, the optimizer has |
|
2473 |
// detected that the allocation attempt will always result in an |
|
2474 |
// exception. There is no fall-through projection of this CatchNode . |
|
2475 |
Node *arg1 = call->in(TypeFunc::Parms+1); |
|
2476 |
if (arg1->is_Type() && |
|
2477 |
arg1->as_Type()->type()->join(TypeInt::POS)->empty()) { |
|
374 | 2478 |
required_outcnt--; |
1 | 2479 |
} |
2480 |
} |
|
2481 |
} |
|
2482 |
} |
|
374 | 2483 |
// Recheck with a better notion of 'required_outcnt' |
2484 |
if (n->outcnt() != required_outcnt) { |
|
1 | 2485 |
record_method_not_compilable("malformed control flow"); |
2486 |
return true; // Not all targets reachable! |
|
2487 |
} |
|
2488 |
} |
|
2489 |
// Check that I actually visited all kids. Unreached kids |
|
2490 |
// must be infinite loops. |
|
2491 |
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) |
|
2492 |
if (!fpu._visited.test(n->fast_out(j)->_idx)) { |
|
2493 |
record_method_not_compilable("infinite loop"); |
|
2494 |
return true; // Found unvisited kid; must be unreach |
|
2495 |
} |
|
2496 |
} |
|
2497 |
||
2498 |
// If original bytecodes contained a mixture of floats and doubles |
|
2499 |
// check if the optimizer has made it homogenous, item (3). |
|
2500 |
if( Use24BitFPMode && Use24BitFP && |
|
2501 |
fpu.get_float_count() > 32 && |
|
2502 |
fpu.get_double_count() == 0 && |
|
2503 |
(10 * fpu.get_call_count() < fpu.get_float_count()) ) { |
|
2504 |
set_24_bit_selection_and_mode( false, true ); |
|
2505 |
} |
|
2506 |
||
2507 |
set_has_java_calls(fpu.get_java_call_count() > 0); |
|
2508 |
||
2509 |
// No infinite loops, no reason to bail out. |
|
2510 |
return false; |
|
2511 |
} |
|
2512 |
||
2513 |
//-----------------------------too_many_traps---------------------------------- |
|
2514 |
// Report if there are too many traps at the current method and bci. |
|
2515 |
// Return true if there was a trap, and/or PerMethodTrapLimit is exceeded. |
|
2516 |
bool Compile::too_many_traps(ciMethod* method, |
|
2517 |
int bci, |
|
2518 |
Deoptimization::DeoptReason reason) { |
|
2519 |
ciMethodData* md = method->method_data(); |
|
2520 |
if (md->is_empty()) { |
|
2521 |
// Assume the trap has not occurred, or that it occurred only |
|
2522 |
// because of a transient condition during start-up in the interpreter. |
|
2523 |
return false; |
|
2524 |
} |
|
2525 |
if (md->has_trap_at(bci, reason) != 0) { |
|
2526 |
// Assume PerBytecodeTrapLimit==0, for a more conservative heuristic. |
|
2527 |
// Also, if there are multiple reasons, or if there is no per-BCI record, |
|
2528 |
// assume the worst. |
|
2529 |
if (log()) |
|
2530 |
log()->elem("observe trap='%s' count='%d'", |
|
2531 |
Deoptimization::trap_reason_name(reason), |
|
2532 |
md->trap_count(reason)); |
|
2533 |
return true; |
|
2534 |
} else { |
|
2535 |
// Ignore method/bci and see if there have been too many globally. |
|
2536 |
return too_many_traps(reason, md); |
|
2537 |
} |
|
2538 |
} |
|
2539 |
||
2540 |
// Less-accurate variant which does not require a method and bci. |
|
2541 |
bool Compile::too_many_traps(Deoptimization::DeoptReason reason, |
|
2542 |
ciMethodData* logmd) { |
|
2543 |
if (trap_count(reason) >= (uint)PerMethodTrapLimit) { |
|
2544 |
// Too many traps globally. |
|
2545 |
// Note that we use cumulative trap_count, not just md->trap_count. |
|
2546 |
if (log()) { |
|
2547 |
int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason); |
|
2548 |
log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'", |
|
2549 |
Deoptimization::trap_reason_name(reason), |
|
2550 |
mcount, trap_count(reason)); |
|
2551 |
} |
|
2552 |
return true; |
|
2553 |
} else { |
|
2554 |
// The coast is clear. |
|
2555 |
return false; |
|
2556 |
} |
|
2557 |
} |
|
2558 |
||
2559 |
//--------------------------too_many_recompiles-------------------------------- |
|
2560 |
// Report if there are too many recompiles at the current method and bci. |
|
2561 |
// Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff. |
|
2562 |
// Is not eager to return true, since this will cause the compiler to use |
|
2563 |
// Action_none for a trap point, to avoid too many recompilations. |
|
2564 |
bool Compile::too_many_recompiles(ciMethod* method, |
|
2565 |
int bci, |
|
2566 |
Deoptimization::DeoptReason reason) { |
|
2567 |
ciMethodData* md = method->method_data(); |
|
2568 |
if (md->is_empty()) { |
|
2569 |
// Assume the trap has not occurred, or that it occurred only |
|
2570 |
// because of a transient condition during start-up in the interpreter. |
|
2571 |
return false; |
|
2572 |
} |
|
2573 |
// Pick a cutoff point well within PerBytecodeRecompilationCutoff. |
|
2574 |
uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8; |
|
2575 |
uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero |
|
2576 |
Deoptimization::DeoptReason per_bc_reason |
|
2577 |
= Deoptimization::reason_recorded_per_bytecode_if_any(reason); |
|
2578 |
if ((per_bc_reason == Deoptimization::Reason_none |
|
2579 |
|| md->has_trap_at(bci, reason) != 0) |
|
2580 |
// The trap frequency measure we care about is the recompile count: |
|
2581 |
&& md->trap_recompiled_at(bci) |
|
2582 |
&& md->overflow_recompile_count() >= bc_cutoff) { |
|
2583 |
// Do not emit a trap here if it has already caused recompilations. |
|
2584 |
// Also, if there are multiple reasons, or if there is no per-BCI record, |
|
2585 |
// assume the worst. |
|
2586 |
if (log()) |
|
2587 |
log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'", |
|
2588 |
Deoptimization::trap_reason_name(reason), |
|
2589 |
md->trap_count(reason), |
|
2590 |
md->overflow_recompile_count()); |
|
2591 |
return true; |
|
2592 |
} else if (trap_count(reason) != 0 |
|
2593 |
&& decompile_count() >= m_cutoff) { |
|
2594 |
// Too many recompiles globally, and we have seen this sort of trap. |
|
2595 |
// Use cumulative decompile_count, not just md->decompile_count. |
|
2596 |
if (log()) |
|
2597 |
log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'", |
|
2598 |
Deoptimization::trap_reason_name(reason), |
|
2599 |
md->trap_count(reason), trap_count(reason), |
|
2600 |
md->decompile_count(), decompile_count()); |
|
2601 |
return true; |
|
2602 |
} else { |
|
2603 |
// The coast is clear. |
|
2604 |
return false; |
|
2605 |
} |
|
2606 |
} |
|
2607 |
||
2608 |
||
2609 |
#ifndef PRODUCT |
|
2610 |
//------------------------------verify_graph_edges--------------------------- |
|
2611 |
// Walk the Graph and verify that there is a one-to-one correspondence |
|
2612 |
// between Use-Def edges and Def-Use edges in the graph. |
|
2613 |
void Compile::verify_graph_edges(bool no_dead_code) { |
|
2614 |
if (VerifyGraphEdges) { |
|
2615 |
ResourceArea *area = Thread::current()->resource_area(); |
|
2616 |
Unique_Node_List visited(area); |
|
2617 |
// Call recursive graph walk to check edges |
|
2618 |
_root->verify_edges(visited); |
|
2619 |
if (no_dead_code) { |
|
2620 |
// Now make sure that no visited node is used by an unvisited node. |
|
2621 |
bool dead_nodes = 0; |
|
2622 |
Unique_Node_List checked(area); |
|
2623 |
while (visited.size() > 0) { |
|
2624 |
Node* n = visited.pop(); |
|
2625 |
checked.push(n); |
|
2626 |
for (uint i = 0; i < n->outcnt(); i++) { |
|
2627 |
Node* use = n->raw_out(i); |
|
2628 |
if (checked.member(use)) continue; // already checked |
|
2629 |
if (visited.member(use)) continue; // already in the graph |
|
2630 |
if (use->is_Con()) continue; // a dead ConNode is OK |
|
2631 |
// At this point, we have found a dead node which is DU-reachable. |
|
2632 |
if (dead_nodes++ == 0) |
|
2633 |
tty->print_cr("*** Dead nodes reachable via DU edges:"); |
|
2634 |
use->dump(2); |
|
2635 |
tty->print_cr("---"); |
|
2636 |
checked.push(use); // No repeats; pretend it is now checked. |
|
2637 |
} |
|
2638 |
} |
|
2639 |
assert(dead_nodes == 0, "using nodes must be reachable from root"); |
|
2640 |
} |
|
2641 |
} |
|
2642 |
} |
|
2643 |
#endif |
|
2644 |
||
2645 |
// The Compile object keeps track of failure reasons separately from the ciEnv. |
|
2646 |
// This is required because there is not quite a 1-1 relation between the |
|
2647 |
// ciEnv and its compilation task and the Compile object. Note that one |
|
2648 |
// ciEnv might use two Compile objects, if C2Compiler::compile_method decides |
|
2649 |
// to backtrack and retry without subsuming loads. Other than this backtracking |
|
2650 |
// behavior, the Compile's failure reason is quietly copied up to the ciEnv |
|
2651 |
// by the logic in C2Compiler. |
|
2652 |
void Compile::record_failure(const char* reason) { |
|
2653 |
if (log() != NULL) { |
|
2654 |
log()->elem("failure reason='%s' phase='compile'", reason); |
|
2655 |
} |
|
2656 |
if (_failure_reason == NULL) { |
|
2657 |
// Record the first failure reason. |
|
2658 |
_failure_reason = reason; |
|
2659 |
} |
|
768 | 2660 |
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { |
2661 |
C->print_method(_failure_reason); |
|
2662 |
} |
|
1 | 2663 |
_root = NULL; // flush the graph, too |
2664 |
} |
|
2665 |
||
2666 |
Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog) |
|
2667 |
: TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false) |
|
2668 |
{ |
|
2669 |
if (dolog) { |
|
2670 |
C = Compile::current(); |
|
2671 |
_log = C->log(); |
|
2672 |
} else { |
|
2673 |
C = NULL; |
|
2674 |
_log = NULL; |
|
2675 |
} |
|
2676 |
if (_log != NULL) { |
|
2677 |
_log->begin_head("phase name='%s' nodes='%d'", name, C->unique()); |
|
2678 |
_log->stamp(); |
|
2679 |
_log->end_head(); |
|
2680 |
} |
|
2681 |
} |
|
2682 |
||
2683 |
Compile::TracePhase::~TracePhase() { |
|
2684 |
if (_log != NULL) { |
|
2685 |
_log->done("phase nodes='%d'", C->unique()); |
|
2686 |
} |
|
2687 |
} |