1
|
1 |
/*
|
1623
|
2 |
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
|
1
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "incls/_precompiled.incl"
|
|
26 |
#include "incls/_c1_GraphBuilder.cpp.incl"
|
|
27 |
|
|
28 |
class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
|
|
29 |
private:
|
|
30 |
Compilation* _compilation;
|
|
31 |
IRScope* _scope;
|
|
32 |
|
|
33 |
BlockList _blocks; // internal list of all blocks
|
|
34 |
BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder
|
|
35 |
|
|
36 |
// fields used by mark_loops
|
|
37 |
BitMap _active; // for iteration of control flow graph
|
|
38 |
BitMap _visited; // for iteration of control flow graph
|
|
39 |
intArray _loop_map; // caches the information if a block is contained in a loop
|
|
40 |
int _next_loop_index; // next free loop number
|
|
41 |
int _next_block_number; // for reverse postorder numbering of blocks
|
|
42 |
|
|
43 |
// accessors
|
|
44 |
Compilation* compilation() const { return _compilation; }
|
|
45 |
IRScope* scope() const { return _scope; }
|
|
46 |
ciMethod* method() const { return scope()->method(); }
|
|
47 |
XHandlers* xhandlers() const { return scope()->xhandlers(); }
|
|
48 |
|
|
49 |
// unified bailout support
|
|
50 |
void bailout(const char* msg) const { compilation()->bailout(msg); }
|
|
51 |
bool bailed_out() const { return compilation()->bailed_out(); }
|
|
52 |
|
|
53 |
// helper functions
|
|
54 |
BlockBegin* make_block_at(int bci, BlockBegin* predecessor);
|
|
55 |
void handle_exceptions(BlockBegin* current, int cur_bci);
|
|
56 |
void handle_jsr(BlockBegin* current, int sr_bci, int next_bci);
|
|
57 |
void store_one(BlockBegin* current, int local);
|
|
58 |
void store_two(BlockBegin* current, int local);
|
|
59 |
void set_entries(int osr_bci);
|
|
60 |
void set_leaders();
|
|
61 |
|
|
62 |
void make_loop_header(BlockBegin* block);
|
|
63 |
void mark_loops();
|
|
64 |
int mark_loops(BlockBegin* b, bool in_subroutine);
|
|
65 |
|
|
66 |
// debugging
|
|
67 |
#ifndef PRODUCT
|
|
68 |
void print();
|
|
69 |
#endif
|
|
70 |
|
|
71 |
public:
|
|
72 |
// creation
|
|
73 |
BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci);
|
|
74 |
|
|
75 |
// accessors for GraphBuilder
|
|
76 |
BlockList* bci2block() const { return _bci2block; }
|
|
77 |
};
|
|
78 |
|
|
79 |
|
|
80 |
// Implementation of BlockListBuilder
|
|
81 |
|
|
82 |
BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
|
|
83 |
: _compilation(compilation)
|
|
84 |
, _scope(scope)
|
|
85 |
, _blocks(16)
|
|
86 |
, _bci2block(new BlockList(scope->method()->code_size(), NULL))
|
|
87 |
, _next_block_number(0)
|
|
88 |
, _active() // size not known yet
|
|
89 |
, _visited() // size not known yet
|
|
90 |
, _next_loop_index(0)
|
|
91 |
, _loop_map() // size not known yet
|
|
92 |
{
|
|
93 |
set_entries(osr_bci);
|
|
94 |
set_leaders();
|
|
95 |
CHECK_BAILOUT();
|
|
96 |
|
|
97 |
mark_loops();
|
|
98 |
NOT_PRODUCT(if (PrintInitialBlockList) print());
|
|
99 |
|
|
100 |
#ifndef PRODUCT
|
|
101 |
if (PrintCFGToFile) {
|
|
102 |
stringStream title;
|
|
103 |
title.print("BlockListBuilder ");
|
|
104 |
scope->method()->print_name(&title);
|
|
105 |
CFGPrinter::print_cfg(_bci2block, title.as_string(), false, false);
|
|
106 |
}
|
|
107 |
#endif
|
|
108 |
}
|
|
109 |
|
|
110 |
|
|
111 |
void BlockListBuilder::set_entries(int osr_bci) {
|
|
112 |
// generate start blocks
|
|
113 |
BlockBegin* std_entry = make_block_at(0, NULL);
|
|
114 |
if (scope()->caller() == NULL) {
|
|
115 |
std_entry->set(BlockBegin::std_entry_flag);
|
|
116 |
}
|
|
117 |
if (osr_bci != -1) {
|
|
118 |
BlockBegin* osr_entry = make_block_at(osr_bci, NULL);
|
|
119 |
osr_entry->set(BlockBegin::osr_entry_flag);
|
|
120 |
}
|
|
121 |
|
|
122 |
// generate exception entry blocks
|
|
123 |
XHandlers* list = xhandlers();
|
|
124 |
const int n = list->length();
|
|
125 |
for (int i = 0; i < n; i++) {
|
|
126 |
XHandler* h = list->handler_at(i);
|
|
127 |
BlockBegin* entry = make_block_at(h->handler_bci(), NULL);
|
|
128 |
entry->set(BlockBegin::exception_entry_flag);
|
|
129 |
h->set_entry_block(entry);
|
|
130 |
}
|
|
131 |
}
|
|
132 |
|
|
133 |
|
|
134 |
BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) {
|
|
135 |
assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer");
|
|
136 |
|
|
137 |
BlockBegin* block = _bci2block->at(cur_bci);
|
|
138 |
if (block == NULL) {
|
|
139 |
block = new BlockBegin(cur_bci);
|
|
140 |
block->init_stores_to_locals(method()->max_locals());
|
|
141 |
_bci2block->at_put(cur_bci, block);
|
|
142 |
_blocks.append(block);
|
|
143 |
|
|
144 |
assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
|
|
145 |
}
|
|
146 |
|
|
147 |
if (predecessor != NULL) {
|
|
148 |
if (block->is_set(BlockBegin::exception_entry_flag)) {
|
|
149 |
BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block);
|
|
150 |
}
|
|
151 |
|
|
152 |
predecessor->add_successor(block);
|
|
153 |
block->increment_total_preds();
|
|
154 |
}
|
|
155 |
|
|
156 |
return block;
|
|
157 |
}
|
|
158 |
|
|
159 |
|
|
160 |
inline void BlockListBuilder::store_one(BlockBegin* current, int local) {
|
|
161 |
current->stores_to_locals().set_bit(local);
|
|
162 |
}
|
|
163 |
inline void BlockListBuilder::store_two(BlockBegin* current, int local) {
|
|
164 |
store_one(current, local);
|
|
165 |
store_one(current, local + 1);
|
|
166 |
}
|
|
167 |
|
|
168 |
|
|
169 |
void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) {
|
|
170 |
// Draws edges from a block to its exception handlers
|
|
171 |
XHandlers* list = xhandlers();
|
|
172 |
const int n = list->length();
|
|
173 |
|
|
174 |
for (int i = 0; i < n; i++) {
|
|
175 |
XHandler* h = list->handler_at(i);
|
|
176 |
|
|
177 |
if (h->covers(cur_bci)) {
|
|
178 |
BlockBegin* entry = h->entry_block();
|
|
179 |
assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set");
|
|
180 |
assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set");
|
|
181 |
|
|
182 |
// add each exception handler only once
|
|
183 |
if (!current->is_successor(entry)) {
|
|
184 |
current->add_successor(entry);
|
|
185 |
entry->increment_total_preds();
|
|
186 |
}
|
|
187 |
|
|
188 |
// stop when reaching catchall
|
|
189 |
if (h->catch_type() == 0) break;
|
|
190 |
}
|
|
191 |
}
|
|
192 |
}
|
|
193 |
|
|
194 |
void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) {
|
|
195 |
// start a new block after jsr-bytecode and link this block into cfg
|
|
196 |
make_block_at(next_bci, current);
|
|
197 |
|
|
198 |
// start a new block at the subroutine entry at mark it with special flag
|
|
199 |
BlockBegin* sr_block = make_block_at(sr_bci, current);
|
|
200 |
if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) {
|
|
201 |
sr_block->set(BlockBegin::subroutine_entry_flag);
|
|
202 |
}
|
|
203 |
}
|
|
204 |
|
|
205 |
|
|
206 |
void BlockListBuilder::set_leaders() {
|
|
207 |
bool has_xhandlers = xhandlers()->has_handlers();
|
|
208 |
BlockBegin* current = NULL;
|
|
209 |
|
|
210 |
// The information which bci starts a new block simplifies the analysis
|
|
211 |
// Without it, backward branches could jump to a bci where no block was created
|
|
212 |
// during bytecode iteration. This would require the creation of a new block at the
|
|
213 |
// branch target and a modification of the successor lists.
|
|
214 |
BitMap bci_block_start = method()->bci_block_start();
|
|
215 |
|
|
216 |
ciBytecodeStream s(method());
|
|
217 |
while (s.next() != ciBytecodeStream::EOBC()) {
|
|
218 |
int cur_bci = s.cur_bci();
|
|
219 |
|
|
220 |
if (bci_block_start.at(cur_bci)) {
|
|
221 |
current = make_block_at(cur_bci, current);
|
|
222 |
}
|
|
223 |
assert(current != NULL, "must have current block");
|
|
224 |
|
|
225 |
if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) {
|
|
226 |
handle_exceptions(current, cur_bci);
|
|
227 |
}
|
|
228 |
|
|
229 |
switch (s.cur_bc()) {
|
|
230 |
// track stores to local variables for selective creation of phi functions
|
|
231 |
case Bytecodes::_iinc: store_one(current, s.get_index()); break;
|
|
232 |
case Bytecodes::_istore: store_one(current, s.get_index()); break;
|
|
233 |
case Bytecodes::_lstore: store_two(current, s.get_index()); break;
|
|
234 |
case Bytecodes::_fstore: store_one(current, s.get_index()); break;
|
|
235 |
case Bytecodes::_dstore: store_two(current, s.get_index()); break;
|
|
236 |
case Bytecodes::_astore: store_one(current, s.get_index()); break;
|
|
237 |
case Bytecodes::_istore_0: store_one(current, 0); break;
|
|
238 |
case Bytecodes::_istore_1: store_one(current, 1); break;
|
|
239 |
case Bytecodes::_istore_2: store_one(current, 2); break;
|
|
240 |
case Bytecodes::_istore_3: store_one(current, 3); break;
|
|
241 |
case Bytecodes::_lstore_0: store_two(current, 0); break;
|
|
242 |
case Bytecodes::_lstore_1: store_two(current, 1); break;
|
|
243 |
case Bytecodes::_lstore_2: store_two(current, 2); break;
|
|
244 |
case Bytecodes::_lstore_3: store_two(current, 3); break;
|
|
245 |
case Bytecodes::_fstore_0: store_one(current, 0); break;
|
|
246 |
case Bytecodes::_fstore_1: store_one(current, 1); break;
|
|
247 |
case Bytecodes::_fstore_2: store_one(current, 2); break;
|
|
248 |
case Bytecodes::_fstore_3: store_one(current, 3); break;
|
|
249 |
case Bytecodes::_dstore_0: store_two(current, 0); break;
|
|
250 |
case Bytecodes::_dstore_1: store_two(current, 1); break;
|
|
251 |
case Bytecodes::_dstore_2: store_two(current, 2); break;
|
|
252 |
case Bytecodes::_dstore_3: store_two(current, 3); break;
|
|
253 |
case Bytecodes::_astore_0: store_one(current, 0); break;
|
|
254 |
case Bytecodes::_astore_1: store_one(current, 1); break;
|
|
255 |
case Bytecodes::_astore_2: store_one(current, 2); break;
|
|
256 |
case Bytecodes::_astore_3: store_one(current, 3); break;
|
|
257 |
|
|
258 |
// track bytecodes that affect the control flow
|
|
259 |
case Bytecodes::_athrow: // fall through
|
|
260 |
case Bytecodes::_ret: // fall through
|
|
261 |
case Bytecodes::_ireturn: // fall through
|
|
262 |
case Bytecodes::_lreturn: // fall through
|
|
263 |
case Bytecodes::_freturn: // fall through
|
|
264 |
case Bytecodes::_dreturn: // fall through
|
|
265 |
case Bytecodes::_areturn: // fall through
|
|
266 |
case Bytecodes::_return:
|
|
267 |
current = NULL;
|
|
268 |
break;
|
|
269 |
|
|
270 |
case Bytecodes::_ifeq: // fall through
|
|
271 |
case Bytecodes::_ifne: // fall through
|
|
272 |
case Bytecodes::_iflt: // fall through
|
|
273 |
case Bytecodes::_ifge: // fall through
|
|
274 |
case Bytecodes::_ifgt: // fall through
|
|
275 |
case Bytecodes::_ifle: // fall through
|
|
276 |
case Bytecodes::_if_icmpeq: // fall through
|
|
277 |
case Bytecodes::_if_icmpne: // fall through
|
|
278 |
case Bytecodes::_if_icmplt: // fall through
|
|
279 |
case Bytecodes::_if_icmpge: // fall through
|
|
280 |
case Bytecodes::_if_icmpgt: // fall through
|
|
281 |
case Bytecodes::_if_icmple: // fall through
|
|
282 |
case Bytecodes::_if_acmpeq: // fall through
|
|
283 |
case Bytecodes::_if_acmpne: // fall through
|
|
284 |
case Bytecodes::_ifnull: // fall through
|
|
285 |
case Bytecodes::_ifnonnull:
|
|
286 |
make_block_at(s.next_bci(), current);
|
|
287 |
make_block_at(s.get_dest(), current);
|
|
288 |
current = NULL;
|
|
289 |
break;
|
|
290 |
|
|
291 |
case Bytecodes::_goto:
|
|
292 |
make_block_at(s.get_dest(), current);
|
|
293 |
current = NULL;
|
|
294 |
break;
|
|
295 |
|
|
296 |
case Bytecodes::_goto_w:
|
|
297 |
make_block_at(s.get_far_dest(), current);
|
|
298 |
current = NULL;
|
|
299 |
break;
|
|
300 |
|
|
301 |
case Bytecodes::_jsr:
|
|
302 |
handle_jsr(current, s.get_dest(), s.next_bci());
|
|
303 |
current = NULL;
|
|
304 |
break;
|
|
305 |
|
|
306 |
case Bytecodes::_jsr_w:
|
|
307 |
handle_jsr(current, s.get_far_dest(), s.next_bci());
|
|
308 |
current = NULL;
|
|
309 |
break;
|
|
310 |
|
|
311 |
case Bytecodes::_tableswitch: {
|
|
312 |
// set block for each case
|
|
313 |
Bytecode_tableswitch *switch_ = Bytecode_tableswitch_at(s.cur_bcp());
|
|
314 |
int l = switch_->length();
|
|
315 |
for (int i = 0; i < l; i++) {
|
|
316 |
make_block_at(cur_bci + switch_->dest_offset_at(i), current);
|
|
317 |
}
|
|
318 |
make_block_at(cur_bci + switch_->default_offset(), current);
|
|
319 |
current = NULL;
|
|
320 |
break;
|
|
321 |
}
|
|
322 |
|
|
323 |
case Bytecodes::_lookupswitch: {
|
|
324 |
// set block for each case
|
|
325 |
Bytecode_lookupswitch *switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
|
|
326 |
int l = switch_->number_of_pairs();
|
|
327 |
for (int i = 0; i < l; i++) {
|
|
328 |
make_block_at(cur_bci + switch_->pair_at(i)->offset(), current);
|
|
329 |
}
|
|
330 |
make_block_at(cur_bci + switch_->default_offset(), current);
|
|
331 |
current = NULL;
|
|
332 |
break;
|
|
333 |
}
|
|
334 |
}
|
|
335 |
}
|
|
336 |
}
|
|
337 |
|
|
338 |
|
|
339 |
void BlockListBuilder::mark_loops() {
|
|
340 |
ResourceMark rm;
|
|
341 |
|
|
342 |
_active = BitMap(BlockBegin::number_of_blocks()); _active.clear();
|
|
343 |
_visited = BitMap(BlockBegin::number_of_blocks()); _visited.clear();
|
|
344 |
_loop_map = intArray(BlockBegin::number_of_blocks(), 0);
|
|
345 |
_next_loop_index = 0;
|
|
346 |
_next_block_number = _blocks.length();
|
|
347 |
|
|
348 |
// recursively iterate the control flow graph
|
|
349 |
mark_loops(_bci2block->at(0), false);
|
|
350 |
assert(_next_block_number >= 0, "invalid block numbers");
|
|
351 |
}
|
|
352 |
|
|
353 |
void BlockListBuilder::make_loop_header(BlockBegin* block) {
|
|
354 |
if (block->is_set(BlockBegin::exception_entry_flag)) {
|
|
355 |
// exception edges may look like loops but don't mark them as such
|
|
356 |
// since it screws up block ordering.
|
|
357 |
return;
|
|
358 |
}
|
|
359 |
if (!block->is_set(BlockBegin::parser_loop_header_flag)) {
|
|
360 |
block->set(BlockBegin::parser_loop_header_flag);
|
|
361 |
|
|
362 |
assert(_loop_map.at(block->block_id()) == 0, "must not be set yet");
|
|
363 |
assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer");
|
|
364 |
_loop_map.at_put(block->block_id(), 1 << _next_loop_index);
|
|
365 |
if (_next_loop_index < 31) _next_loop_index++;
|
|
366 |
} else {
|
|
367 |
// block already marked as loop header
|
|
368 |
assert(is_power_of_2(_loop_map.at(block->block_id())), "exactly one bit must be set");
|
|
369 |
}
|
|
370 |
}
|
|
371 |
|
|
372 |
int BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) {
|
|
373 |
int block_id = block->block_id();
|
|
374 |
|
|
375 |
if (_visited.at(block_id)) {
|
|
376 |
if (_active.at(block_id)) {
|
|
377 |
// reached block via backward branch
|
|
378 |
make_loop_header(block);
|
|
379 |
}
|
|
380 |
// return cached loop information for this block
|
|
381 |
return _loop_map.at(block_id);
|
|
382 |
}
|
|
383 |
|
|
384 |
if (block->is_set(BlockBegin::subroutine_entry_flag)) {
|
|
385 |
in_subroutine = true;
|
|
386 |
}
|
|
387 |
|
|
388 |
// set active and visited bits before successors are processed
|
|
389 |
_visited.set_bit(block_id);
|
|
390 |
_active.set_bit(block_id);
|
|
391 |
|
|
392 |
intptr_t loop_state = 0;
|
|
393 |
for (int i = block->number_of_sux() - 1; i >= 0; i--) {
|
|
394 |
// recursively process all successors
|
|
395 |
loop_state |= mark_loops(block->sux_at(i), in_subroutine);
|
|
396 |
}
|
|
397 |
|
|
398 |
// clear active-bit after all successors are processed
|
|
399 |
_active.clear_bit(block_id);
|
|
400 |
|
|
401 |
// reverse-post-order numbering of all blocks
|
|
402 |
block->set_depth_first_number(_next_block_number);
|
|
403 |
_next_block_number--;
|
|
404 |
|
|
405 |
if (loop_state != 0 || in_subroutine ) {
|
|
406 |
// block is contained at least in one loop, so phi functions are necessary
|
|
407 |
// phi functions are also necessary for all locals stored in a subroutine
|
|
408 |
scope()->requires_phi_function().set_union(block->stores_to_locals());
|
|
409 |
}
|
|
410 |
|
|
411 |
if (block->is_set(BlockBegin::parser_loop_header_flag)) {
|
|
412 |
int header_loop_state = _loop_map.at(block_id);
|
|
413 |
assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set");
|
|
414 |
|
|
415 |
// If the highest bit is set (i.e. when integer value is negative), the method
|
|
416 |
// has 32 or more loops. This bit is never cleared because it is used for multiple loops
|
|
417 |
if (header_loop_state >= 0) {
|
|
418 |
clear_bits(loop_state, header_loop_state);
|
|
419 |
}
|
|
420 |
}
|
|
421 |
|
|
422 |
// cache and return loop information for this block
|
|
423 |
_loop_map.at_put(block_id, loop_state);
|
|
424 |
return loop_state;
|
|
425 |
}
|
|
426 |
|
|
427 |
|
|
428 |
#ifndef PRODUCT
|
|
429 |
|
|
430 |
int compare_depth_first(BlockBegin** a, BlockBegin** b) {
|
|
431 |
return (*a)->depth_first_number() - (*b)->depth_first_number();
|
|
432 |
}
|
|
433 |
|
|
434 |
void BlockListBuilder::print() {
|
|
435 |
tty->print("----- initial block list of BlockListBuilder for method ");
|
|
436 |
method()->print_short_name();
|
|
437 |
tty->cr();
|
|
438 |
|
|
439 |
// better readability if blocks are sorted in processing order
|
|
440 |
_blocks.sort(compare_depth_first);
|
|
441 |
|
|
442 |
for (int i = 0; i < _blocks.length(); i++) {
|
|
443 |
BlockBegin* cur = _blocks.at(i);
|
|
444 |
tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
|
|
445 |
|
|
446 |
tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " ");
|
|
447 |
tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " ");
|
|
448 |
tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " ");
|
|
449 |
tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " ");
|
|
450 |
tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " ");
|
|
451 |
|
|
452 |
if (cur->number_of_sux() > 0) {
|
|
453 |
tty->print(" sux: ");
|
|
454 |
for (int j = 0; j < cur->number_of_sux(); j++) {
|
|
455 |
BlockBegin* sux = cur->sux_at(j);
|
|
456 |
tty->print("B%d ", sux->block_id());
|
|
457 |
}
|
|
458 |
}
|
|
459 |
tty->cr();
|
|
460 |
}
|
|
461 |
}
|
|
462 |
|
|
463 |
#endif
|
|
464 |
|
|
465 |
|
|
466 |
// A simple growable array of Values indexed by ciFields
|
|
467 |
class FieldBuffer: public CompilationResourceObj {
|
|
468 |
private:
|
|
469 |
GrowableArray<Value> _values;
|
|
470 |
|
|
471 |
public:
|
|
472 |
FieldBuffer() {}
|
|
473 |
|
|
474 |
void kill() {
|
|
475 |
_values.trunc_to(0);
|
|
476 |
}
|
|
477 |
|
|
478 |
Value at(ciField* field) {
|
|
479 |
assert(field->holder()->is_loaded(), "must be a loaded field");
|
|
480 |
int offset = field->offset();
|
|
481 |
if (offset < _values.length()) {
|
|
482 |
return _values.at(offset);
|
|
483 |
} else {
|
|
484 |
return NULL;
|
|
485 |
}
|
|
486 |
}
|
|
487 |
|
|
488 |
void at_put(ciField* field, Value value) {
|
|
489 |
assert(field->holder()->is_loaded(), "must be a loaded field");
|
|
490 |
int offset = field->offset();
|
|
491 |
_values.at_put_grow(offset, value, NULL);
|
|
492 |
}
|
|
493 |
|
|
494 |
};
|
|
495 |
|
|
496 |
|
|
497 |
// MemoryBuffer is fairly simple model of the current state of memory.
|
|
498 |
// It partitions memory into several pieces. The first piece is
|
|
499 |
// generic memory where little is known about the owner of the memory.
|
|
500 |
// This is conceptually represented by the tuple <O, F, V> which says
|
|
501 |
// that the field F of object O has value V. This is flattened so
|
|
502 |
// that F is represented by the offset of the field and the parallel
|
|
503 |
// arrays _objects and _values are used for O and V. Loads of O.F can
|
|
504 |
// simply use V. Newly allocated objects are kept in a separate list
|
|
505 |
// along with a parallel array for each object which represents the
|
|
506 |
// current value of its fields. Stores of the default value to fields
|
|
507 |
// which have never been stored to before are eliminated since they
|
|
508 |
// are redundant. Once newly allocated objects are stored into
|
|
509 |
// another object or they are passed out of the current compile they
|
|
510 |
// are treated like generic memory.
|
|
511 |
|
|
512 |
class MemoryBuffer: public CompilationResourceObj {
|
|
513 |
private:
|
|
514 |
FieldBuffer _values;
|
|
515 |
GrowableArray<Value> _objects;
|
|
516 |
GrowableArray<Value> _newobjects;
|
|
517 |
GrowableArray<FieldBuffer*> _fields;
|
|
518 |
|
|
519 |
public:
|
|
520 |
MemoryBuffer() {}
|
|
521 |
|
|
522 |
StoreField* store(StoreField* st) {
|
|
523 |
if (!EliminateFieldAccess) {
|
|
524 |
return st;
|
|
525 |
}
|
|
526 |
|
|
527 |
Value object = st->obj();
|
|
528 |
Value value = st->value();
|
|
529 |
ciField* field = st->field();
|
|
530 |
if (field->holder()->is_loaded()) {
|
|
531 |
int offset = field->offset();
|
|
532 |
int index = _newobjects.find(object);
|
|
533 |
if (index != -1) {
|
|
534 |
// newly allocated object with no other stores performed on this field
|
|
535 |
FieldBuffer* buf = _fields.at(index);
|
|
536 |
if (buf->at(field) == NULL && is_default_value(value)) {
|
|
537 |
#ifndef PRODUCT
|
|
538 |
if (PrintIRDuringConstruction && Verbose) {
|
|
539 |
tty->print_cr("Eliminated store for object %d:", index);
|
|
540 |
st->print_line();
|
|
541 |
}
|
|
542 |
#endif
|
|
543 |
return NULL;
|
|
544 |
} else {
|
|
545 |
buf->at_put(field, value);
|
|
546 |
}
|
|
547 |
} else {
|
|
548 |
_objects.at_put_grow(offset, object, NULL);
|
|
549 |
_values.at_put(field, value);
|
|
550 |
}
|
|
551 |
|
|
552 |
store_value(value);
|
|
553 |
} else {
|
|
554 |
// if we held onto field names we could alias based on names but
|
|
555 |
// we don't know what's being stored to so kill it all.
|
|
556 |
kill();
|
|
557 |
}
|
|
558 |
return st;
|
|
559 |
}
|
|
560 |
|
|
561 |
|
|
562 |
// return true if this value correspond to the default value of a field.
|
|
563 |
bool is_default_value(Value value) {
|
|
564 |
Constant* con = value->as_Constant();
|
|
565 |
if (con) {
|
|
566 |
switch (con->type()->tag()) {
|
|
567 |
case intTag: return con->type()->as_IntConstant()->value() == 0;
|
|
568 |
case longTag: return con->type()->as_LongConstant()->value() == 0;
|
|
569 |
case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0;
|
|
570 |
case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0);
|
|
571 |
case objectTag: return con->type() == objectNull;
|
|
572 |
default: ShouldNotReachHere();
|
|
573 |
}
|
|
574 |
}
|
|
575 |
return false;
|
|
576 |
}
|
|
577 |
|
|
578 |
|
|
579 |
// return either the actual value of a load or the load itself
|
|
580 |
Value load(LoadField* load) {
|
|
581 |
if (!EliminateFieldAccess) {
|
|
582 |
return load;
|
|
583 |
}
|
|
584 |
|
|
585 |
if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) {
|
|
586 |
// can't skip load since value might get rounded as a side effect
|
|
587 |
return load;
|
|
588 |
}
|
|
589 |
|
|
590 |
ciField* field = load->field();
|
|
591 |
Value object = load->obj();
|
|
592 |
if (field->holder()->is_loaded() && !field->is_volatile()) {
|
|
593 |
int offset = field->offset();
|
|
594 |
Value result = NULL;
|
|
595 |
int index = _newobjects.find(object);
|
|
596 |
if (index != -1) {
|
|
597 |
result = _fields.at(index)->at(field);
|
|
598 |
} else if (_objects.at_grow(offset, NULL) == object) {
|
|
599 |
result = _values.at(field);
|
|
600 |
}
|
|
601 |
if (result != NULL) {
|
|
602 |
#ifndef PRODUCT
|
|
603 |
if (PrintIRDuringConstruction && Verbose) {
|
|
604 |
tty->print_cr("Eliminated load: ");
|
|
605 |
load->print_line();
|
|
606 |
}
|
|
607 |
#endif
|
|
608 |
assert(result->type()->tag() == load->type()->tag(), "wrong types");
|
|
609 |
return result;
|
|
610 |
}
|
|
611 |
}
|
|
612 |
return load;
|
|
613 |
}
|
|
614 |
|
|
615 |
// Record this newly allocated object
|
|
616 |
void new_instance(NewInstance* object) {
|
|
617 |
int index = _newobjects.length();
|
|
618 |
_newobjects.append(object);
|
|
619 |
if (_fields.at_grow(index, NULL) == NULL) {
|
|
620 |
_fields.at_put(index, new FieldBuffer());
|
|
621 |
} else {
|
|
622 |
_fields.at(index)->kill();
|
|
623 |
}
|
|
624 |
}
|
|
625 |
|
|
626 |
void store_value(Value value) {
|
|
627 |
int index = _newobjects.find(value);
|
|
628 |
if (index != -1) {
|
|
629 |
// stored a newly allocated object into another object.
|
|
630 |
// Assume we've lost track of it as separate slice of memory.
|
|
631 |
// We could do better by keeping track of whether individual
|
|
632 |
// fields could alias each other.
|
|
633 |
_newobjects.remove_at(index);
|
|
634 |
// pull out the field info and store it at the end up the list
|
|
635 |
// of field info list to be reused later.
|
|
636 |
_fields.append(_fields.at(index));
|
|
637 |
_fields.remove_at(index);
|
|
638 |
}
|
|
639 |
}
|
|
640 |
|
|
641 |
void kill() {
|
|
642 |
_newobjects.trunc_to(0);
|
|
643 |
_objects.trunc_to(0);
|
|
644 |
_values.kill();
|
|
645 |
}
|
|
646 |
};
|
|
647 |
|
|
648 |
|
|
649 |
// Implementation of GraphBuilder's ScopeData
|
|
650 |
|
|
651 |
GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
|
|
652 |
: _parent(parent)
|
|
653 |
, _bci2block(NULL)
|
|
654 |
, _scope(NULL)
|
|
655 |
, _has_handler(false)
|
|
656 |
, _stream(NULL)
|
|
657 |
, _work_list(NULL)
|
|
658 |
, _parsing_jsr(false)
|
|
659 |
, _jsr_xhandlers(NULL)
|
|
660 |
, _caller_stack_size(-1)
|
|
661 |
, _continuation(NULL)
|
|
662 |
, _continuation_state(NULL)
|
|
663 |
, _num_returns(0)
|
|
664 |
, _cleanup_block(NULL)
|
|
665 |
, _cleanup_return_prev(NULL)
|
|
666 |
, _cleanup_state(NULL)
|
|
667 |
{
|
|
668 |
if (parent != NULL) {
|
|
669 |
_max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
|
|
670 |
} else {
|
|
671 |
_max_inline_size = MaxInlineSize;
|
|
672 |
}
|
|
673 |
if (_max_inline_size < MaxTrivialSize) {
|
|
674 |
_max_inline_size = MaxTrivialSize;
|
|
675 |
}
|
|
676 |
}
|
|
677 |
|
|
678 |
|
|
679 |
void GraphBuilder::kill_all() {
|
|
680 |
if (UseLocalValueNumbering) {
|
|
681 |
vmap()->kill_all();
|
|
682 |
}
|
|
683 |
_memory->kill();
|
|
684 |
}
|
|
685 |
|
|
686 |
|
|
687 |
BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
|
|
688 |
if (parsing_jsr()) {
|
|
689 |
// It is necessary to clone all blocks associated with a
|
|
690 |
// subroutine, including those for exception handlers in the scope
|
|
691 |
// of the method containing the jsr (because those exception
|
|
692 |
// handlers may contain ret instructions in some cases).
|
|
693 |
BlockBegin* block = bci2block()->at(bci);
|
|
694 |
if (block != NULL && block == parent()->bci2block()->at(bci)) {
|
|
695 |
BlockBegin* new_block = new BlockBegin(block->bci());
|
|
696 |
#ifndef PRODUCT
|
|
697 |
if (PrintInitialBlockList) {
|
|
698 |
tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
|
|
699 |
block->block_id(), block->bci(), new_block->block_id());
|
|
700 |
}
|
|
701 |
#endif
|
|
702 |
// copy data from cloned blocked
|
|
703 |
new_block->set_depth_first_number(block->depth_first_number());
|
|
704 |
if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
|
|
705 |
// Preserve certain flags for assertion checking
|
|
706 |
if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
|
|
707 |
if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag);
|
|
708 |
|
|
709 |
// copy was_visited_flag to allow early detection of bailouts
|
|
710 |
// if a block that is used in a jsr has already been visited before,
|
|
711 |
// it is shared between the normal control flow and a subroutine
|
|
712 |
// BlockBegin::try_merge returns false when the flag is set, this leads
|
|
713 |
// to a compilation bailout
|
|
714 |
if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag);
|
|
715 |
|
|
716 |
bci2block()->at_put(bci, new_block);
|
|
717 |
block = new_block;
|
|
718 |
}
|
|
719 |
return block;
|
|
720 |
} else {
|
|
721 |
return bci2block()->at(bci);
|
|
722 |
}
|
|
723 |
}
|
|
724 |
|
|
725 |
|
|
726 |
XHandlers* GraphBuilder::ScopeData::xhandlers() const {
|
|
727 |
if (_jsr_xhandlers == NULL) {
|
|
728 |
assert(!parsing_jsr(), "");
|
|
729 |
return scope()->xhandlers();
|
|
730 |
}
|
|
731 |
assert(parsing_jsr(), "");
|
|
732 |
return _jsr_xhandlers;
|
|
733 |
}
|
|
734 |
|
|
735 |
|
|
736 |
void GraphBuilder::ScopeData::set_scope(IRScope* scope) {
|
|
737 |
_scope = scope;
|
|
738 |
bool parent_has_handler = false;
|
|
739 |
if (parent() != NULL) {
|
|
740 |
parent_has_handler = parent()->has_handler();
|
|
741 |
}
|
|
742 |
_has_handler = parent_has_handler || scope->xhandlers()->has_handlers();
|
|
743 |
}
|
|
744 |
|
|
745 |
|
|
746 |
void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block,
|
|
747 |
Instruction* return_prev,
|
|
748 |
ValueStack* return_state) {
|
|
749 |
_cleanup_block = block;
|
|
750 |
_cleanup_return_prev = return_prev;
|
|
751 |
_cleanup_state = return_state;
|
|
752 |
}
|
|
753 |
|
|
754 |
|
|
755 |
void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) {
|
|
756 |
if (_work_list == NULL) {
|
|
757 |
_work_list = new BlockList();
|
|
758 |
}
|
|
759 |
|
|
760 |
if (!block->is_set(BlockBegin::is_on_work_list_flag)) {
|
|
761 |
// Do not start parsing the continuation block while in a
|
|
762 |
// sub-scope
|
|
763 |
if (parsing_jsr()) {
|
|
764 |
if (block == jsr_continuation()) {
|
|
765 |
return;
|
|
766 |
}
|
|
767 |
} else {
|
|
768 |
if (block == continuation()) {
|
|
769 |
return;
|
|
770 |
}
|
|
771 |
}
|
|
772 |
block->set(BlockBegin::is_on_work_list_flag);
|
|
773 |
_work_list->push(block);
|
|
774 |
|
|
775 |
sort_top_into_worklist(_work_list, block);
|
|
776 |
}
|
|
777 |
}
|
|
778 |
|
|
779 |
|
|
780 |
void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) {
|
|
781 |
assert(worklist->top() == top, "");
|
|
782 |
// sort block descending into work list
|
|
783 |
const int dfn = top->depth_first_number();
|
|
784 |
assert(dfn != -1, "unknown depth first number");
|
|
785 |
int i = worklist->length()-2;
|
|
786 |
while (i >= 0) {
|
|
787 |
BlockBegin* b = worklist->at(i);
|
|
788 |
if (b->depth_first_number() < dfn) {
|
|
789 |
worklist->at_put(i+1, b);
|
|
790 |
} else {
|
|
791 |
break;
|
|
792 |
}
|
|
793 |
i --;
|
|
794 |
}
|
|
795 |
if (i >= -1) worklist->at_put(i + 1, top);
|
|
796 |
}
|
|
797 |
|
|
798 |
int GraphBuilder::ScopeData::caller_stack_size() const {
|
|
799 |
ValueStack* state = scope()->caller_state();
|
|
800 |
if (state == NULL) {
|
|
801 |
return 0;
|
|
802 |
}
|
|
803 |
return state->stack_size();
|
|
804 |
}
|
|
805 |
|
|
806 |
|
|
807 |
BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
|
|
808 |
if (is_work_list_empty()) {
|
|
809 |
return NULL;
|
|
810 |
}
|
|
811 |
return _work_list->pop();
|
|
812 |
}
|
|
813 |
|
|
814 |
|
|
815 |
bool GraphBuilder::ScopeData::is_work_list_empty() const {
|
|
816 |
return (_work_list == NULL || _work_list->length() == 0);
|
|
817 |
}
|
|
818 |
|
|
819 |
|
|
820 |
void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
|
|
821 |
assert(parsing_jsr(), "");
|
|
822 |
// clone all the exception handlers from the scope
|
|
823 |
XHandlers* handlers = new XHandlers(scope()->xhandlers());
|
|
824 |
const int n = handlers->length();
|
|
825 |
for (int i = 0; i < n; i++) {
|
|
826 |
// The XHandlers need to be adjusted to dispatch to the cloned
|
|
827 |
// handler block instead of the default one but the synthetic
|
|
828 |
// unlocker needs to be handled specially. The synthetic unlocker
|
|
829 |
// should be left alone since there can be only one and all code
|
|
830 |
// should dispatch to the same one.
|
|
831 |
XHandler* h = handlers->handler_at(i);
|
|
832 |
if (h->handler_bci() != SynchronizationEntryBCI) {
|
|
833 |
h->set_entry_block(block_at(h->handler_bci()));
|
|
834 |
} else {
|
|
835 |
assert(h->entry_block()->is_set(BlockBegin::default_exception_handler_flag),
|
|
836 |
"should be the synthetic unlock block");
|
|
837 |
}
|
|
838 |
}
|
|
839 |
_jsr_xhandlers = handlers;
|
|
840 |
}
|
|
841 |
|
|
842 |
|
|
843 |
int GraphBuilder::ScopeData::num_returns() {
|
|
844 |
if (parsing_jsr()) {
|
|
845 |
return parent()->num_returns();
|
|
846 |
}
|
|
847 |
return _num_returns;
|
|
848 |
}
|
|
849 |
|
|
850 |
|
|
851 |
void GraphBuilder::ScopeData::incr_num_returns() {
|
|
852 |
if (parsing_jsr()) {
|
|
853 |
parent()->incr_num_returns();
|
|
854 |
} else {
|
|
855 |
++_num_returns;
|
|
856 |
}
|
|
857 |
}
|
|
858 |
|
|
859 |
|
|
860 |
// Implementation of GraphBuilder
|
|
861 |
|
|
862 |
#define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; }
|
|
863 |
|
|
864 |
|
|
865 |
void GraphBuilder::load_constant() {
|
|
866 |
ciConstant con = stream()->get_constant();
|
|
867 |
if (con.basic_type() == T_ILLEGAL) {
|
|
868 |
BAILOUT("could not resolve a constant");
|
|
869 |
} else {
|
|
870 |
ValueType* t = illegalType;
|
|
871 |
ValueStack* patch_state = NULL;
|
|
872 |
switch (con.basic_type()) {
|
|
873 |
case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break;
|
|
874 |
case T_BYTE : t = new IntConstant (con.as_byte ()); break;
|
|
875 |
case T_CHAR : t = new IntConstant (con.as_char ()); break;
|
|
876 |
case T_SHORT : t = new IntConstant (con.as_short ()); break;
|
|
877 |
case T_INT : t = new IntConstant (con.as_int ()); break;
|
|
878 |
case T_LONG : t = new LongConstant (con.as_long ()); break;
|
|
879 |
case T_FLOAT : t = new FloatConstant (con.as_float ()); break;
|
|
880 |
case T_DOUBLE : t = new DoubleConstant (con.as_double ()); break;
|
|
881 |
case T_ARRAY : t = new ArrayConstant (con.as_object ()->as_array ()); break;
|
|
882 |
case T_OBJECT :
|
|
883 |
{
|
|
884 |
ciObject* obj = con.as_object();
|
|
885 |
if (obj->is_klass()) {
|
|
886 |
ciKlass* klass = obj->as_klass();
|
|
887 |
if (!klass->is_loaded() || PatchALot) {
|
|
888 |
patch_state = state()->copy();
|
|
889 |
t = new ObjectConstant(obj);
|
|
890 |
} else {
|
|
891 |
t = new InstanceConstant(klass->java_mirror());
|
|
892 |
}
|
|
893 |
} else {
|
|
894 |
t = new InstanceConstant(obj->as_instance());
|
|
895 |
}
|
|
896 |
break;
|
|
897 |
}
|
|
898 |
default : ShouldNotReachHere();
|
|
899 |
}
|
|
900 |
Value x;
|
|
901 |
if (patch_state != NULL) {
|
|
902 |
x = new Constant(t, patch_state);
|
|
903 |
} else {
|
|
904 |
x = new Constant(t);
|
|
905 |
}
|
|
906 |
push(t, append(x));
|
|
907 |
}
|
|
908 |
}
|
|
909 |
|
|
910 |
|
|
911 |
void GraphBuilder::load_local(ValueType* type, int index) {
|
|
912 |
Value x = state()->load_local(index);
|
|
913 |
push(type, x);
|
|
914 |
}
|
|
915 |
|
|
916 |
|
|
917 |
void GraphBuilder::store_local(ValueType* type, int index) {
|
|
918 |
Value x = pop(type);
|
|
919 |
store_local(state(), x, type, index);
|
|
920 |
}
|
|
921 |
|
|
922 |
|
|
923 |
void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) {
|
|
924 |
if (parsing_jsr()) {
|
|
925 |
// We need to do additional tracking of the location of the return
|
|
926 |
// address for jsrs since we don't handle arbitrary jsr/ret
|
|
927 |
// constructs. Here we are figuring out in which circumstances we
|
|
928 |
// need to bail out.
|
|
929 |
if (x->type()->is_address()) {
|
|
930 |
scope_data()->set_jsr_return_address_local(index);
|
|
931 |
|
|
932 |
// Also check parent jsrs (if any) at this time to see whether
|
|
933 |
// they are using this local. We don't handle skipping over a
|
|
934 |
// ret.
|
|
935 |
for (ScopeData* cur_scope_data = scope_data()->parent();
|
|
936 |
cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
|
|
937 |
cur_scope_data = cur_scope_data->parent()) {
|
|
938 |
if (cur_scope_data->jsr_return_address_local() == index) {
|
|
939 |
BAILOUT("subroutine overwrites return address from previous subroutine");
|
|
940 |
}
|
|
941 |
}
|
|
942 |
} else if (index == scope_data()->jsr_return_address_local()) {
|
|
943 |
scope_data()->set_jsr_return_address_local(-1);
|
|
944 |
}
|
|
945 |
}
|
|
946 |
|
|
947 |
state->store_local(index, round_fp(x));
|
|
948 |
}
|
|
949 |
|
|
950 |
|
|
951 |
void GraphBuilder::load_indexed(BasicType type) {
|
|
952 |
Value index = ipop();
|
|
953 |
Value array = apop();
|
|
954 |
Value length = NULL;
|
|
955 |
if (CSEArrayLength ||
|
|
956 |
(array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
|
|
957 |
(array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
|
|
958 |
length = append(new ArrayLength(array, lock_stack()));
|
|
959 |
}
|
|
960 |
push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, lock_stack())));
|
|
961 |
}
|
|
962 |
|
|
963 |
|
|
964 |
void GraphBuilder::store_indexed(BasicType type) {
|
|
965 |
Value value = pop(as_ValueType(type));
|
|
966 |
Value index = ipop();
|
|
967 |
Value array = apop();
|
|
968 |
Value length = NULL;
|
|
969 |
if (CSEArrayLength ||
|
|
970 |
(array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
|
|
971 |
(array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
|
|
972 |
length = append(new ArrayLength(array, lock_stack()));
|
|
973 |
}
|
|
974 |
StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack());
|
|
975 |
append(result);
|
1612
|
976 |
_memory->store_value(value);
|
1
|
977 |
}
|
|
978 |
|
|
979 |
|
|
980 |
void GraphBuilder::stack_op(Bytecodes::Code code) {
|
|
981 |
switch (code) {
|
|
982 |
case Bytecodes::_pop:
|
|
983 |
{ state()->raw_pop();
|
|
984 |
}
|
|
985 |
break;
|
|
986 |
case Bytecodes::_pop2:
|
|
987 |
{ state()->raw_pop();
|
|
988 |
state()->raw_pop();
|
|
989 |
}
|
|
990 |
break;
|
|
991 |
case Bytecodes::_dup:
|
|
992 |
{ Value w = state()->raw_pop();
|
|
993 |
state()->raw_push(w);
|
|
994 |
state()->raw_push(w);
|
|
995 |
}
|
|
996 |
break;
|
|
997 |
case Bytecodes::_dup_x1:
|
|
998 |
{ Value w1 = state()->raw_pop();
|
|
999 |
Value w2 = state()->raw_pop();
|
|
1000 |
state()->raw_push(w1);
|
|
1001 |
state()->raw_push(w2);
|
|
1002 |
state()->raw_push(w1);
|
|
1003 |
}
|
|
1004 |
break;
|
|
1005 |
case Bytecodes::_dup_x2:
|
|
1006 |
{ Value w1 = state()->raw_pop();
|
|
1007 |
Value w2 = state()->raw_pop();
|
|
1008 |
Value w3 = state()->raw_pop();
|
|
1009 |
state()->raw_push(w1);
|
|
1010 |
state()->raw_push(w3);
|
|
1011 |
state()->raw_push(w2);
|
|
1012 |
state()->raw_push(w1);
|
|
1013 |
}
|
|
1014 |
break;
|
|
1015 |
case Bytecodes::_dup2:
|
|
1016 |
{ Value w1 = state()->raw_pop();
|
|
1017 |
Value w2 = state()->raw_pop();
|
|
1018 |
state()->raw_push(w2);
|
|
1019 |
state()->raw_push(w1);
|
|
1020 |
state()->raw_push(w2);
|
|
1021 |
state()->raw_push(w1);
|
|
1022 |
}
|
|
1023 |
break;
|
|
1024 |
case Bytecodes::_dup2_x1:
|
|
1025 |
{ Value w1 = state()->raw_pop();
|
|
1026 |
Value w2 = state()->raw_pop();
|
|
1027 |
Value w3 = state()->raw_pop();
|
|
1028 |
state()->raw_push(w2);
|
|
1029 |
state()->raw_push(w1);
|
|
1030 |
state()->raw_push(w3);
|
|
1031 |
state()->raw_push(w2);
|
|
1032 |
state()->raw_push(w1);
|
|
1033 |
}
|
|
1034 |
break;
|
|
1035 |
case Bytecodes::_dup2_x2:
|
|
1036 |
{ Value w1 = state()->raw_pop();
|
|
1037 |
Value w2 = state()->raw_pop();
|
|
1038 |
Value w3 = state()->raw_pop();
|
|
1039 |
Value w4 = state()->raw_pop();
|
|
1040 |
state()->raw_push(w2);
|
|
1041 |
state()->raw_push(w1);
|
|
1042 |
state()->raw_push(w4);
|
|
1043 |
state()->raw_push(w3);
|
|
1044 |
state()->raw_push(w2);
|
|
1045 |
state()->raw_push(w1);
|
|
1046 |
}
|
|
1047 |
break;
|
|
1048 |
case Bytecodes::_swap:
|
|
1049 |
{ Value w1 = state()->raw_pop();
|
|
1050 |
Value w2 = state()->raw_pop();
|
|
1051 |
state()->raw_push(w1);
|
|
1052 |
state()->raw_push(w2);
|
|
1053 |
}
|
|
1054 |
break;
|
|
1055 |
default:
|
|
1056 |
ShouldNotReachHere();
|
|
1057 |
break;
|
|
1058 |
}
|
|
1059 |
}
|
|
1060 |
|
|
1061 |
|
|
1062 |
void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* stack) {
|
|
1063 |
Value y = pop(type);
|
|
1064 |
Value x = pop(type);
|
|
1065 |
// NOTE: strictfp can be queried from current method since we don't
|
|
1066 |
// inline methods with differing strictfp bits
|
|
1067 |
Value res = new ArithmeticOp(code, x, y, method()->is_strict(), stack);
|
|
1068 |
// Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
|
|
1069 |
res = append(res);
|
|
1070 |
if (method()->is_strict()) {
|
|
1071 |
res = round_fp(res);
|
|
1072 |
}
|
|
1073 |
push(type, res);
|
|
1074 |
}
|
|
1075 |
|
|
1076 |
|
|
1077 |
void GraphBuilder::negate_op(ValueType* type) {
|
|
1078 |
push(type, append(new NegateOp(pop(type))));
|
|
1079 |
}
|
|
1080 |
|
|
1081 |
|
|
1082 |
void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) {
|
|
1083 |
Value s = ipop();
|
|
1084 |
Value x = pop(type);
|
|
1085 |
// try to simplify
|
|
1086 |
// Note: This code should go into the canonicalizer as soon as it can
|
|
1087 |
// can handle canonicalized forms that contain more than one node.
|
|
1088 |
if (CanonicalizeNodes && code == Bytecodes::_iushr) {
|
|
1089 |
// pattern: x >>> s
|
|
1090 |
IntConstant* s1 = s->type()->as_IntConstant();
|
|
1091 |
if (s1 != NULL) {
|
|
1092 |
// pattern: x >>> s1, with s1 constant
|
|
1093 |
ShiftOp* l = x->as_ShiftOp();
|
|
1094 |
if (l != NULL && l->op() == Bytecodes::_ishl) {
|
|
1095 |
// pattern: (a << b) >>> s1
|
|
1096 |
IntConstant* s0 = l->y()->type()->as_IntConstant();
|
|
1097 |
if (s0 != NULL) {
|
|
1098 |
// pattern: (a << s0) >>> s1
|
|
1099 |
const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts
|
|
1100 |
const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts
|
|
1101 |
if (s0c == s1c) {
|
|
1102 |
if (s0c == 0) {
|
|
1103 |
// pattern: (a << 0) >>> 0 => simplify to: a
|
|
1104 |
ipush(l->x());
|
|
1105 |
} else {
|
|
1106 |
// pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
|
|
1107 |
assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases");
|
|
1108 |
const int m = (1 << (BitsPerInt - s0c)) - 1;
|
|
1109 |
Value s = append(new Constant(new IntConstant(m)));
|
|
1110 |
ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s)));
|
|
1111 |
}
|
|
1112 |
return;
|
|
1113 |
}
|
|
1114 |
}
|
|
1115 |
}
|
|
1116 |
}
|
|
1117 |
}
|
|
1118 |
// could not simplify
|
|
1119 |
push(type, append(new ShiftOp(code, x, s)));
|
|
1120 |
}
|
|
1121 |
|
|
1122 |
|
|
1123 |
void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) {
|
|
1124 |
Value y = pop(type);
|
|
1125 |
Value x = pop(type);
|
|
1126 |
push(type, append(new LogicOp(code, x, y)));
|
|
1127 |
}
|
|
1128 |
|
|
1129 |
|
|
1130 |
void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
|
|
1131 |
ValueStack* state_before = state()->copy();
|
|
1132 |
Value y = pop(type);
|
|
1133 |
Value x = pop(type);
|
|
1134 |
ipush(append(new CompareOp(code, x, y, state_before)));
|
|
1135 |
}
|
|
1136 |
|
|
1137 |
|
|
1138 |
void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) {
|
|
1139 |
push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to))));
|
|
1140 |
}
|
|
1141 |
|
|
1142 |
|
|
1143 |
void GraphBuilder::increment() {
|
|
1144 |
int index = stream()->get_index();
|
|
1145 |
int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
|
|
1146 |
load_local(intType, index);
|
|
1147 |
ipush(append(new Constant(new IntConstant(delta))));
|
|
1148 |
arithmetic_op(intType, Bytecodes::_iadd);
|
|
1149 |
store_local(intType, index);
|
|
1150 |
}
|
|
1151 |
|
|
1152 |
|
|
1153 |
void GraphBuilder::_goto(int from_bci, int to_bci) {
|
|
1154 |
profile_bci(from_bci);
|
|
1155 |
append(new Goto(block_at(to_bci), to_bci <= from_bci));
|
|
1156 |
}
|
|
1157 |
|
|
1158 |
|
|
1159 |
void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
|
|
1160 |
BlockBegin* tsux = block_at(stream()->get_dest());
|
|
1161 |
BlockBegin* fsux = block_at(stream()->next_bci());
|
|
1162 |
bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
|
|
1163 |
If* if_node = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb))->as_If();
|
|
1164 |
if (profile_branches() && (if_node != NULL)) {
|
|
1165 |
if_node->set_profiled_method(method());
|
|
1166 |
if_node->set_profiled_bci(bci());
|
|
1167 |
if_node->set_should_profile(true);
|
|
1168 |
}
|
|
1169 |
}
|
|
1170 |
|
|
1171 |
|
|
1172 |
void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
|
|
1173 |
Value y = append(new Constant(intZero));
|
|
1174 |
ValueStack* state_before = state()->copy();
|
|
1175 |
Value x = ipop();
|
|
1176 |
if_node(x, cond, y, state_before);
|
|
1177 |
}
|
|
1178 |
|
|
1179 |
|
|
1180 |
void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
|
|
1181 |
Value y = append(new Constant(objectNull));
|
|
1182 |
ValueStack* state_before = state()->copy();
|
|
1183 |
Value x = apop();
|
|
1184 |
if_node(x, cond, y, state_before);
|
|
1185 |
}
|
|
1186 |
|
|
1187 |
|
|
1188 |
void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
|
|
1189 |
ValueStack* state_before = state()->copy();
|
|
1190 |
Value y = pop(type);
|
|
1191 |
Value x = pop(type);
|
|
1192 |
if_node(x, cond, y, state_before);
|
|
1193 |
}
|
|
1194 |
|
|
1195 |
|
|
1196 |
void GraphBuilder::jsr(int dest) {
|
|
1197 |
// We only handle well-formed jsrs (those which are "block-structured").
|
|
1198 |
// If the bytecodes are strange (jumping out of a jsr block) then we
|
|
1199 |
// might end up trying to re-parse a block containing a jsr which
|
|
1200 |
// has already been activated. Watch for this case and bail out.
|
|
1201 |
for (ScopeData* cur_scope_data = scope_data();
|
|
1202 |
cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
|
|
1203 |
cur_scope_data = cur_scope_data->parent()) {
|
|
1204 |
if (cur_scope_data->jsr_entry_bci() == dest) {
|
|
1205 |
BAILOUT("too-complicated jsr/ret structure");
|
|
1206 |
}
|
|
1207 |
}
|
|
1208 |
|
|
1209 |
push(addressType, append(new Constant(new AddressConstant(next_bci()))));
|
|
1210 |
if (!try_inline_jsr(dest)) {
|
|
1211 |
return; // bailed out while parsing and inlining subroutine
|
|
1212 |
}
|
|
1213 |
}
|
|
1214 |
|
|
1215 |
|
|
1216 |
void GraphBuilder::ret(int local_index) {
|
|
1217 |
if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
|
|
1218 |
|
|
1219 |
if (local_index != scope_data()->jsr_return_address_local()) {
|
|
1220 |
BAILOUT("can not handle complicated jsr/ret constructs");
|
|
1221 |
}
|
|
1222 |
|
|
1223 |
// Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
|
|
1224 |
append(new Goto(scope_data()->jsr_continuation(), false));
|
|
1225 |
}
|
|
1226 |
|
|
1227 |
|
|
1228 |
void GraphBuilder::table_switch() {
|
|
1229 |
Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(method()->code() + bci());
|
|
1230 |
const int l = switch_->length();
|
|
1231 |
if (CanonicalizeNodes && l == 1) {
|
|
1232 |
// total of 2 successors => use If instead of switch
|
|
1233 |
// Note: This code should go into the canonicalizer as soon as it can
|
|
1234 |
// can handle canonicalized forms that contain more than one node.
|
|
1235 |
Value key = append(new Constant(new IntConstant(switch_->low_key())));
|
|
1236 |
BlockBegin* tsux = block_at(bci() + switch_->dest_offset_at(0));
|
|
1237 |
BlockBegin* fsux = block_at(bci() + switch_->default_offset());
|
|
1238 |
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
|
|
1239 |
ValueStack* state_before = is_bb ? state() : NULL;
|
|
1240 |
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
|
|
1241 |
} else {
|
|
1242 |
// collect successors
|
|
1243 |
BlockList* sux = new BlockList(l + 1, NULL);
|
|
1244 |
int i;
|
|
1245 |
bool has_bb = false;
|
|
1246 |
for (i = 0; i < l; i++) {
|
|
1247 |
sux->at_put(i, block_at(bci() + switch_->dest_offset_at(i)));
|
|
1248 |
if (switch_->dest_offset_at(i) < 0) has_bb = true;
|
|
1249 |
}
|
|
1250 |
// add default successor
|
|
1251 |
sux->at_put(i, block_at(bci() + switch_->default_offset()));
|
|
1252 |
ValueStack* state_before = has_bb ? state() : NULL;
|
|
1253 |
append(new TableSwitch(ipop(), sux, switch_->low_key(), state_before, has_bb));
|
|
1254 |
}
|
|
1255 |
}
|
|
1256 |
|
|
1257 |
|
|
1258 |
void GraphBuilder::lookup_switch() {
|
|
1259 |
Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(method()->code() + bci());
|
|
1260 |
const int l = switch_->number_of_pairs();
|
|
1261 |
if (CanonicalizeNodes && l == 1) {
|
|
1262 |
// total of 2 successors => use If instead of switch
|
|
1263 |
// Note: This code should go into the canonicalizer as soon as it can
|
|
1264 |
// can handle canonicalized forms that contain more than one node.
|
|
1265 |
// simplify to If
|
|
1266 |
LookupswitchPair* pair = switch_->pair_at(0);
|
|
1267 |
Value key = append(new Constant(new IntConstant(pair->match())));
|
|
1268 |
BlockBegin* tsux = block_at(bci() + pair->offset());
|
|
1269 |
BlockBegin* fsux = block_at(bci() + switch_->default_offset());
|
|
1270 |
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
|
|
1271 |
ValueStack* state_before = is_bb ? state() : NULL;
|
|
1272 |
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
|
|
1273 |
} else {
|
|
1274 |
// collect successors & keys
|
|
1275 |
BlockList* sux = new BlockList(l + 1, NULL);
|
|
1276 |
intArray* keys = new intArray(l, 0);
|
|
1277 |
int i;
|
|
1278 |
bool has_bb = false;
|
|
1279 |
for (i = 0; i < l; i++) {
|
|
1280 |
LookupswitchPair* pair = switch_->pair_at(i);
|
|
1281 |
if (pair->offset() < 0) has_bb = true;
|
|
1282 |
sux->at_put(i, block_at(bci() + pair->offset()));
|
|
1283 |
keys->at_put(i, pair->match());
|
|
1284 |
}
|
|
1285 |
// add default successor
|
|
1286 |
sux->at_put(i, block_at(bci() + switch_->default_offset()));
|
|
1287 |
ValueStack* state_before = has_bb ? state() : NULL;
|
|
1288 |
append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
|
|
1289 |
}
|
|
1290 |
}
|
|
1291 |
|
|
1292 |
void GraphBuilder::call_register_finalizer() {
|
|
1293 |
// If the receiver requires finalization then emit code to perform
|
|
1294 |
// the registration on return.
|
|
1295 |
|
|
1296 |
// Gather some type information about the receiver
|
|
1297 |
Value receiver = state()->load_local(0);
|
|
1298 |
assert(receiver != NULL, "must have a receiver");
|
|
1299 |
ciType* declared_type = receiver->declared_type();
|
|
1300 |
ciType* exact_type = receiver->exact_type();
|
|
1301 |
if (exact_type == NULL &&
|
|
1302 |
receiver->as_Local() &&
|
|
1303 |
receiver->as_Local()->java_index() == 0) {
|
|
1304 |
ciInstanceKlass* ik = compilation()->method()->holder();
|
|
1305 |
if (ik->is_final()) {
|
|
1306 |
exact_type = ik;
|
|
1307 |
} else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
|
|
1308 |
// test class is leaf class
|
|
1309 |
compilation()->dependency_recorder()->assert_leaf_type(ik);
|
|
1310 |
exact_type = ik;
|
|
1311 |
} else {
|
|
1312 |
declared_type = ik;
|
|
1313 |
}
|
|
1314 |
}
|
|
1315 |
|
|
1316 |
// see if we know statically that registration isn't required
|
|
1317 |
bool needs_check = true;
|
|
1318 |
if (exact_type != NULL) {
|
|
1319 |
needs_check = exact_type->as_instance_klass()->has_finalizer();
|
|
1320 |
} else if (declared_type != NULL) {
|
|
1321 |
ciInstanceKlass* ik = declared_type->as_instance_klass();
|
|
1322 |
if (!Dependencies::has_finalizable_subclass(ik)) {
|
|
1323 |
compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik);
|
|
1324 |
needs_check = false;
|
|
1325 |
}
|
|
1326 |
}
|
|
1327 |
|
|
1328 |
if (needs_check) {
|
|
1329 |
// Perform the registration of finalizable objects.
|
|
1330 |
load_local(objectType, 0);
|
|
1331 |
append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
|
|
1332 |
state()->pop_arguments(1),
|
|
1333 |
true, lock_stack(), true));
|
|
1334 |
}
|
|
1335 |
}
|
|
1336 |
|
|
1337 |
|
|
1338 |
void GraphBuilder::method_return(Value x) {
|
|
1339 |
if (RegisterFinalizersAtInit &&
|
|
1340 |
method()->intrinsic_id() == vmIntrinsics::_Object_init) {
|
|
1341 |
call_register_finalizer();
|
|
1342 |
}
|
|
1343 |
|
|
1344 |
// Check to see whether we are inlining. If so, Return
|
|
1345 |
// instructions become Gotos to the continuation point.
|
|
1346 |
if (continuation() != NULL) {
|
|
1347 |
assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
|
|
1348 |
|
|
1349 |
// If the inlined method is synchronized, the monitor must be
|
|
1350 |
// released before we jump to the continuation block.
|
|
1351 |
if (method()->is_synchronized()) {
|
|
1352 |
int i = state()->caller_state()->locks_size();
|
|
1353 |
assert(state()->locks_size() == i + 1, "receiver must be locked here");
|
|
1354 |
monitorexit(state()->lock_at(i), SynchronizationEntryBCI);
|
|
1355 |
}
|
|
1356 |
|
|
1357 |
state()->truncate_stack(caller_stack_size());
|
|
1358 |
if (x != NULL) {
|
|
1359 |
state()->push(x->type(), x);
|
|
1360 |
}
|
|
1361 |
Goto* goto_callee = new Goto(continuation(), false);
|
|
1362 |
|
|
1363 |
// See whether this is the first return; if so, store off some
|
|
1364 |
// of the state for later examination
|
|
1365 |
if (num_returns() == 0) {
|
|
1366 |
set_inline_cleanup_info(_block, _last, state());
|
|
1367 |
}
|
|
1368 |
|
|
1369 |
// State at end of inlined method is the state of the caller
|
|
1370 |
// without the method parameters on stack, including the
|
|
1371 |
// return value, if any, of the inlined method on operand stack.
|
|
1372 |
set_state(scope_data()->continuation_state()->copy());
|
|
1373 |
if (x) {
|
|
1374 |
state()->push(x->type(), x);
|
|
1375 |
}
|
|
1376 |
|
|
1377 |
// The current bci() is in the wrong scope, so use the bci() of
|
|
1378 |
// the continuation point.
|
|
1379 |
append_with_bci(goto_callee, scope_data()->continuation()->bci());
|
|
1380 |
incr_num_returns();
|
|
1381 |
|
|
1382 |
return;
|
|
1383 |
}
|
|
1384 |
|
|
1385 |
state()->truncate_stack(0);
|
|
1386 |
if (method()->is_synchronized()) {
|
|
1387 |
// perform the unlocking before exiting the method
|
|
1388 |
Value receiver;
|
|
1389 |
if (!method()->is_static()) {
|
|
1390 |
receiver = _initial_state->local_at(0);
|
|
1391 |
} else {
|
|
1392 |
receiver = append(new Constant(new ClassConstant(method()->holder())));
|
|
1393 |
}
|
|
1394 |
append_split(new MonitorExit(receiver, state()->unlock()));
|
|
1395 |
}
|
|
1396 |
|
|
1397 |
append(new Return(x));
|
|
1398 |
}
|
|
1399 |
|
|
1400 |
|
|
1401 |
void GraphBuilder::access_field(Bytecodes::Code code) {
|
|
1402 |
bool will_link;
|
|
1403 |
ciField* field = stream()->get_field(will_link);
|
|
1404 |
ciInstanceKlass* holder = field->holder();
|
|
1405 |
BasicType field_type = field->type()->basic_type();
|
|
1406 |
ValueType* type = as_ValueType(field_type);
|
|
1407 |
// call will_link again to determine if the field is valid.
|
|
1408 |
const bool is_loaded = holder->is_loaded() &&
|
|
1409 |
field->will_link(method()->holder(), code);
|
|
1410 |
const bool is_initialized = is_loaded && holder->is_initialized();
|
|
1411 |
|
|
1412 |
ValueStack* state_copy = NULL;
|
|
1413 |
if (!is_initialized || PatchALot) {
|
|
1414 |
// save state before instruction for debug info when
|
|
1415 |
// deoptimization happens during patching
|
|
1416 |
state_copy = state()->copy();
|
|
1417 |
}
|
|
1418 |
|
|
1419 |
Value obj = NULL;
|
|
1420 |
if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
|
|
1421 |
// commoning of class constants should only occur if the class is
|
|
1422 |
// fully initialized and resolved in this constant pool. The will_link test
|
|
1423 |
// above essentially checks if this class is resolved in this constant pool
|
|
1424 |
// so, the is_initialized flag should be suffiect.
|
|
1425 |
if (state_copy != NULL) {
|
|
1426 |
// build a patching constant
|
|
1427 |
obj = new Constant(new ClassConstant(holder), state_copy);
|
|
1428 |
} else {
|
|
1429 |
obj = new Constant(new ClassConstant(holder));
|
|
1430 |
}
|
|
1431 |
}
|
|
1432 |
|
|
1433 |
|
|
1434 |
const int offset = is_loaded ? field->offset() : -1;
|
|
1435 |
switch (code) {
|
|
1436 |
case Bytecodes::_getstatic: {
|
|
1437 |
// check for compile-time constants, i.e., initialized static final fields
|
|
1438 |
Instruction* constant = NULL;
|
|
1439 |
if (field->is_constant() && !PatchALot) {
|
|
1440 |
ciConstant field_val = field->constant_value();
|
|
1441 |
BasicType field_type = field_val.basic_type();
|
|
1442 |
switch (field_type) {
|
|
1443 |
case T_ARRAY:
|
|
1444 |
case T_OBJECT:
|
|
1445 |
if (field_val.as_object()->has_encoding()) {
|
|
1446 |
constant = new Constant(as_ValueType(field_val));
|
|
1447 |
}
|
|
1448 |
break;
|
|
1449 |
|
|
1450 |
default:
|
|
1451 |
constant = new Constant(as_ValueType(field_val));
|
|
1452 |
}
|
|
1453 |
}
|
|
1454 |
if (constant != NULL) {
|
|
1455 |
push(type, append(constant));
|
|
1456 |
state_copy = NULL; // Not a potential deoptimization point (see set_state_before logic below)
|
|
1457 |
} else {
|
|
1458 |
push(type, append(new LoadField(append(obj), offset, field, true,
|
|
1459 |
lock_stack(), state_copy, is_loaded, is_initialized)));
|
|
1460 |
}
|
|
1461 |
break;
|
|
1462 |
}
|
|
1463 |
case Bytecodes::_putstatic:
|
|
1464 |
{ Value val = pop(type);
|
|
1465 |
append(new StoreField(append(obj), offset, field, val, true, lock_stack(), state_copy, is_loaded, is_initialized));
|
|
1466 |
}
|
|
1467 |
break;
|
|
1468 |
case Bytecodes::_getfield :
|
|
1469 |
{
|
|
1470 |
LoadField* load = new LoadField(apop(), offset, field, false, lock_stack(), state_copy, is_loaded, true);
|
|
1471 |
Value replacement = is_loaded ? _memory->load(load) : load;
|
|
1472 |
if (replacement != load) {
|
|
1473 |
assert(replacement->bci() != -99 || replacement->as_Phi() || replacement->as_Local(),
|
|
1474 |
"should already by linked");
|
|
1475 |
push(type, replacement);
|
|
1476 |
} else {
|
|
1477 |
push(type, append(load));
|
|
1478 |
}
|
|
1479 |
break;
|
|
1480 |
}
|
|
1481 |
|
|
1482 |
case Bytecodes::_putfield :
|
|
1483 |
{ Value val = pop(type);
|
|
1484 |
StoreField* store = new StoreField(apop(), offset, field, val, false, lock_stack(), state_copy, is_loaded, true);
|
|
1485 |
if (is_loaded) store = _memory->store(store);
|
|
1486 |
if (store != NULL) {
|
|
1487 |
append(store);
|
|
1488 |
}
|
|
1489 |
}
|
|
1490 |
break;
|
|
1491 |
default :
|
|
1492 |
ShouldNotReachHere();
|
|
1493 |
break;
|
|
1494 |
}
|
|
1495 |
}
|
|
1496 |
|
|
1497 |
|
|
1498 |
Dependencies* GraphBuilder::dependency_recorder() const {
|
|
1499 |
assert(DeoptC1, "need debug information");
|
|
1500 |
compilation()->set_needs_debug_information(true);
|
|
1501 |
return compilation()->dependency_recorder();
|
|
1502 |
}
|
|
1503 |
|
|
1504 |
|
|
1505 |
void GraphBuilder::invoke(Bytecodes::Code code) {
|
|
1506 |
bool will_link;
|
|
1507 |
ciMethod* target = stream()->get_method(will_link);
|
|
1508 |
// we have to make sure the argument size (incl. the receiver)
|
|
1509 |
// is correct for compilation (the call would fail later during
|
|
1510 |
// linkage anyway) - was bug (gri 7/28/99)
|
|
1511 |
if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error");
|
|
1512 |
ciInstanceKlass* klass = target->holder();
|
|
1513 |
|
|
1514 |
// check if CHA possible: if so, change the code to invoke_special
|
|
1515 |
ciInstanceKlass* calling_klass = method()->holder();
|
|
1516 |
ciKlass* holder = stream()->get_declared_method_holder();
|
|
1517 |
ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
|
|
1518 |
ciInstanceKlass* actual_recv = callee_holder;
|
|
1519 |
|
|
1520 |
// some methods are obviously bindable without any type checks so
|
|
1521 |
// convert them directly to an invokespecial.
|
|
1522 |
if (target->is_loaded() && !target->is_abstract() &&
|
|
1523 |
target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) {
|
|
1524 |
code = Bytecodes::_invokespecial;
|
|
1525 |
}
|
|
1526 |
|
|
1527 |
// NEEDS_CLEANUP
|
|
1528 |
// I've added the target-is_loaded() test below but I don't really understand
|
|
1529 |
// how klass->is_loaded() can be true and yet target->is_loaded() is false.
|
|
1530 |
// this happened while running the JCK invokevirtual tests under doit. TKR
|
|
1531 |
ciMethod* cha_monomorphic_target = NULL;
|
|
1532 |
ciMethod* exact_target = NULL;
|
|
1533 |
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded()) {
|
|
1534 |
Value receiver = NULL;
|
|
1535 |
ciInstanceKlass* receiver_klass = NULL;
|
|
1536 |
bool type_is_exact = false;
|
|
1537 |
// try to find a precise receiver type
|
|
1538 |
if (will_link && !target->is_static()) {
|
|
1539 |
int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
|
|
1540 |
receiver = state()->stack_at(index);
|
|
1541 |
ciType* type = receiver->exact_type();
|
|
1542 |
if (type != NULL && type->is_loaded() &&
|
|
1543 |
type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
|
|
1544 |
receiver_klass = (ciInstanceKlass*) type;
|
|
1545 |
type_is_exact = true;
|
|
1546 |
}
|
|
1547 |
if (type == NULL) {
|
|
1548 |
type = receiver->declared_type();
|
|
1549 |
if (type != NULL && type->is_loaded() &&
|
|
1550 |
type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
|
|
1551 |
receiver_klass = (ciInstanceKlass*) type;
|
|
1552 |
if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) {
|
|
1553 |
// Insert a dependency on this type since
|
|
1554 |
// find_monomorphic_target may assume it's already done.
|
|
1555 |
dependency_recorder()->assert_leaf_type(receiver_klass);
|
|
1556 |
type_is_exact = true;
|
|
1557 |
}
|
|
1558 |
}
|
|
1559 |
}
|
|
1560 |
}
|
|
1561 |
if (receiver_klass != NULL && type_is_exact &&
|
|
1562 |
receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) {
|
|
1563 |
// If we have the exact receiver type we can bind directly to
|
|
1564 |
// the method to call.
|
|
1565 |
exact_target = target->resolve_invoke(calling_klass, receiver_klass);
|
|
1566 |
if (exact_target != NULL) {
|
|
1567 |
target = exact_target;
|
|
1568 |
code = Bytecodes::_invokespecial;
|
|
1569 |
}
|
|
1570 |
}
|
|
1571 |
if (receiver_klass != NULL &&
|
|
1572 |
receiver_klass->is_subtype_of(actual_recv) &&
|
|
1573 |
actual_recv->is_initialized()) {
|
|
1574 |
actual_recv = receiver_klass;
|
|
1575 |
}
|
|
1576 |
|
|
1577 |
if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) ||
|
|
1578 |
(code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
|
|
1579 |
// Use CHA on the receiver to select a more precise method.
|
|
1580 |
cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
|
|
1581 |
} else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != NULL) {
|
|
1582 |
// if there is only one implementor of this interface then we
|
|
1583 |
// may be able bind this invoke directly to the implementing
|
|
1584 |
// klass but we need both a dependence on the single interface
|
|
1585 |
// and on the method we bind to. Additionally since all we know
|
|
1586 |
// about the receiver type is the it's supposed to implement the
|
|
1587 |
// interface we have to insert a check that it's the class we
|
|
1588 |
// expect. Interface types are not checked by the verifier so
|
|
1589 |
// they are roughly equivalent to Object.
|
|
1590 |
ciInstanceKlass* singleton = NULL;
|
|
1591 |
if (target->holder()->nof_implementors() == 1) {
|
|
1592 |
singleton = target->holder()->implementor(0);
|
|
1593 |
}
|
|
1594 |
if (singleton) {
|
|
1595 |
cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
|
|
1596 |
if (cha_monomorphic_target != NULL) {
|
|
1597 |
// If CHA is able to bind this invoke then update the class
|
|
1598 |
// to match that class, otherwise klass will refer to the
|
|
1599 |
// interface.
|
|
1600 |
klass = cha_monomorphic_target->holder();
|
|
1601 |
actual_recv = target->holder();
|
|
1602 |
|
|
1603 |
// insert a check it's really the expected class.
|
|
1604 |
CheckCast* c = new CheckCast(klass, receiver, NULL);
|
|
1605 |
c->set_incompatible_class_change_check();
|
|
1606 |
c->set_direct_compare(klass->is_final());
|
|
1607 |
append_split(c);
|
|
1608 |
}
|
|
1609 |
}
|
|
1610 |
}
|
|
1611 |
}
|
|
1612 |
|
|
1613 |
if (cha_monomorphic_target != NULL) {
|
|
1614 |
if (cha_monomorphic_target->is_abstract()) {
|
|
1615 |
// Do not optimize for abstract methods
|
|
1616 |
cha_monomorphic_target = NULL;
|
|
1617 |
}
|
|
1618 |
}
|
|
1619 |
|
|
1620 |
if (cha_monomorphic_target != NULL) {
|
|
1621 |
if (!(target->is_final_method())) {
|
|
1622 |
// If we inlined because CHA revealed only a single target method,
|
|
1623 |
// then we are dependent on that target method not getting overridden
|
|
1624 |
// by dynamic class loading. Be sure to test the "static" receiver
|
|
1625 |
// dest_method here, as opposed to the actual receiver, which may
|
|
1626 |
// falsely lead us to believe that the receiver is final or private.
|
|
1627 |
dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target);
|
|
1628 |
}
|
|
1629 |
code = Bytecodes::_invokespecial;
|
|
1630 |
}
|
|
1631 |
// check if we could do inlining
|
|
1632 |
if (!PatchALot && Inline && klass->is_loaded() &&
|
|
1633 |
(klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
|
|
1634 |
&& target->will_link(klass, callee_holder, code)) {
|
|
1635 |
// callee is known => check if we have static binding
|
|
1636 |
assert(target->is_loaded(), "callee must be known");
|
|
1637 |
if (code == Bytecodes::_invokestatic
|
|
1638 |
|| code == Bytecodes::_invokespecial
|
|
1639 |
|| code == Bytecodes::_invokevirtual && target->is_final_method()
|
|
1640 |
) {
|
|
1641 |
// static binding => check if callee is ok
|
|
1642 |
ciMethod* inline_target = (cha_monomorphic_target != NULL)
|
|
1643 |
? cha_monomorphic_target
|
|
1644 |
: target;
|
|
1645 |
bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
|
|
1646 |
CHECK_BAILOUT();
|
|
1647 |
|
|
1648 |
#ifndef PRODUCT
|
|
1649 |
// printing
|
|
1650 |
if (PrintInlining && !res) {
|
|
1651 |
// if it was successfully inlined, then it was already printed.
|
|
1652 |
print_inline_result(inline_target, res);
|
|
1653 |
}
|
|
1654 |
#endif
|
|
1655 |
clear_inline_bailout();
|
|
1656 |
if (res) {
|
|
1657 |
// Register dependence if JVMTI has either breakpoint
|
|
1658 |
// setting or hotswapping of methods capabilities since they may
|
|
1659 |
// cause deoptimization.
|
|
1660 |
if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
|
|
1661 |
dependency_recorder()->assert_evol_method(inline_target);
|
|
1662 |
}
|
|
1663 |
return;
|
|
1664 |
}
|
|
1665 |
}
|
|
1666 |
}
|
|
1667 |
// If we attempted an inline which did not succeed because of a
|
|
1668 |
// bailout during construction of the callee graph, the entire
|
|
1669 |
// compilation has to be aborted. This is fairly rare and currently
|
|
1670 |
// seems to only occur for jasm-generated classes which contain
|
|
1671 |
// jsr/ret pairs which are not associated with finally clauses and
|
|
1672 |
// do not have exception handlers in the containing method, and are
|
|
1673 |
// therefore not caught early enough to abort the inlining without
|
|
1674 |
// corrupting the graph. (We currently bail out with a non-empty
|
|
1675 |
// stack at a ret in these situations.)
|
|
1676 |
CHECK_BAILOUT();
|
|
1677 |
|
|
1678 |
// inlining not successful => standard invoke
|
|
1679 |
bool is_static = code == Bytecodes::_invokestatic;
|
|
1680 |
ValueType* result_type = as_ValueType(target->return_type());
|
|
1681 |
Values* args = state()->pop_arguments(target->arg_size_no_receiver());
|
|
1682 |
Value recv = is_static ? NULL : apop();
|
|
1683 |
bool is_loaded = target->is_loaded();
|
|
1684 |
int vtable_index = methodOopDesc::invalid_vtable_index;
|
|
1685 |
|
|
1686 |
#ifdef SPARC
|
|
1687 |
// Currently only supported on Sparc.
|
|
1688 |
// The UseInlineCaches only controls dispatch to invokevirtuals for
|
|
1689 |
// loaded classes which we weren't able to statically bind.
|
|
1690 |
if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
|
|
1691 |
&& !target->can_be_statically_bound()) {
|
|
1692 |
// Find a vtable index if one is available
|
|
1693 |
vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
|
|
1694 |
}
|
|
1695 |
#endif
|
|
1696 |
|
|
1697 |
if (recv != NULL &&
|
|
1698 |
(code == Bytecodes::_invokespecial ||
|
|
1699 |
!is_loaded || target->is_final() ||
|
|
1700 |
profile_calls())) {
|
|
1701 |
// invokespecial always needs a NULL check. invokevirtual where
|
|
1702 |
// the target is final or where it's not known that whether the
|
|
1703 |
// target is final requires a NULL check. Otherwise normal
|
|
1704 |
// invokevirtual will perform the null check during the lookup
|
|
1705 |
// logic or the unverified entry point. Profiling of calls
|
|
1706 |
// requires that the null check is performed in all cases.
|
|
1707 |
null_check(recv);
|
|
1708 |
}
|
|
1709 |
|
|
1710 |
if (profile_calls()) {
|
|
1711 |
assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
|
|
1712 |
ciKlass* target_klass = NULL;
|
|
1713 |
if (cha_monomorphic_target != NULL) {
|
|
1714 |
target_klass = cha_monomorphic_target->holder();
|
|
1715 |
} else if (exact_target != NULL) {
|
|
1716 |
target_klass = exact_target->holder();
|
|
1717 |
}
|
|
1718 |
profile_call(recv, target_klass);
|
|
1719 |
}
|
|
1720 |
|
|
1721 |
Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target);
|
|
1722 |
// push result
|
|
1723 |
append_split(result);
|
|
1724 |
|
|
1725 |
if (result_type != voidType) {
|
|
1726 |
if (method()->is_strict()) {
|
|
1727 |
push(result_type, round_fp(result));
|
|
1728 |
} else {
|
|
1729 |
push(result_type, result);
|
|
1730 |
}
|
|
1731 |
}
|
|
1732 |
}
|
|
1733 |
|
|
1734 |
|
|
1735 |
void GraphBuilder::new_instance(int klass_index) {
|
|
1736 |
bool will_link;
|
|
1737 |
ciKlass* klass = stream()->get_klass(will_link);
|
|
1738 |
assert(klass->is_instance_klass(), "must be an instance klass");
|
|
1739 |
NewInstance* new_instance = new NewInstance(klass->as_instance_klass());
|
|
1740 |
_memory->new_instance(new_instance);
|
|
1741 |
apush(append_split(new_instance));
|
|
1742 |
}
|
|
1743 |
|
|
1744 |
|
|
1745 |
void GraphBuilder::new_type_array() {
|
|
1746 |
apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index())));
|
|
1747 |
}
|
|
1748 |
|
|
1749 |
|
|
1750 |
void GraphBuilder::new_object_array() {
|
|
1751 |
bool will_link;
|
|
1752 |
ciKlass* klass = stream()->get_klass(will_link);
|
|
1753 |
ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
|
|
1754 |
NewArray* n = new NewObjectArray(klass, ipop(), state_before);
|
|
1755 |
apush(append_split(n));
|
|
1756 |
}
|
|
1757 |
|
|
1758 |
|
|
1759 |
bool GraphBuilder::direct_compare(ciKlass* k) {
|
|
1760 |
if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
|
|
1761 |
ciInstanceKlass* ik = k->as_instance_klass();
|
|
1762 |
if (ik->is_final()) {
|
|
1763 |
return true;
|
|
1764 |
} else {
|
|
1765 |
if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
|
|
1766 |
// test class is leaf class
|
|
1767 |
dependency_recorder()->assert_leaf_type(ik);
|
|
1768 |
return true;
|
|
1769 |
}
|
|
1770 |
}
|
|
1771 |
}
|
|
1772 |
return false;
|
|
1773 |
}
|
|
1774 |
|
|
1775 |
|
|
1776 |
void GraphBuilder::check_cast(int klass_index) {
|
|
1777 |
bool will_link;
|
|
1778 |
ciKlass* klass = stream()->get_klass(will_link);
|
|
1779 |
ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
|
|
1780 |
CheckCast* c = new CheckCast(klass, apop(), state_before);
|
|
1781 |
apush(append_split(c));
|
|
1782 |
c->set_direct_compare(direct_compare(klass));
|
|
1783 |
if (profile_checkcasts()) {
|
|
1784 |
c->set_profiled_method(method());
|
|
1785 |
c->set_profiled_bci(bci());
|
|
1786 |
c->set_should_profile(true);
|
|
1787 |
}
|
|
1788 |
}
|
|
1789 |
|
|
1790 |
|
|
1791 |
void GraphBuilder::instance_of(int klass_index) {
|
|
1792 |
bool will_link;
|
|
1793 |
ciKlass* klass = stream()->get_klass(will_link);
|
|
1794 |
ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
|
|
1795 |
InstanceOf* i = new InstanceOf(klass, apop(), state_before);
|
|
1796 |
ipush(append_split(i));
|
|
1797 |
i->set_direct_compare(direct_compare(klass));
|
|
1798 |
}
|
|
1799 |
|
|
1800 |
|
|
1801 |
void GraphBuilder::monitorenter(Value x, int bci) {
|
|
1802 |
// save state before locking in case of deoptimization after a NullPointerException
|
|
1803 |
ValueStack* lock_stack_before = lock_stack();
|
|
1804 |
append_with_bci(new MonitorEnter(x, state()->lock(scope(), x), lock_stack_before), bci);
|
|
1805 |
kill_all();
|
|
1806 |
}
|
|
1807 |
|
|
1808 |
|
|
1809 |
void GraphBuilder::monitorexit(Value x, int bci) {
|
|
1810 |
// Note: the comment below is only relevant for the case where we do
|
|
1811 |
// not deoptimize due to asynchronous exceptions (!(DeoptC1 &&
|
|
1812 |
// DeoptOnAsyncException), which is not used anymore)
|
|
1813 |
|
|
1814 |
// Note: Potentially, the monitor state in an exception handler
|
|
1815 |
// can be wrong due to wrong 'initialization' of the handler
|
|
1816 |
// via a wrong asynchronous exception path. This can happen,
|
|
1817 |
// if the exception handler range for asynchronous exceptions
|
|
1818 |
// is too long (see also java bug 4327029, and comment in
|
|
1819 |
// GraphBuilder::handle_exception()). This may cause 'under-
|
|
1820 |
// flow' of the monitor stack => bailout instead.
|
|
1821 |
if (state()->locks_size() < 1) BAILOUT("monitor stack underflow");
|
|
1822 |
append_with_bci(new MonitorExit(x, state()->unlock()), bci);
|
|
1823 |
kill_all();
|
|
1824 |
}
|
|
1825 |
|
|
1826 |
|
|
1827 |
void GraphBuilder::new_multi_array(int dimensions) {
|
|
1828 |
bool will_link;
|
|
1829 |
ciKlass* klass = stream()->get_klass(will_link);
|
|
1830 |
ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL;
|
|
1831 |
|
|
1832 |
Values* dims = new Values(dimensions, NULL);
|
|
1833 |
// fill in all dimensions
|
|
1834 |
int i = dimensions;
|
|
1835 |
while (i-- > 0) dims->at_put(i, ipop());
|
|
1836 |
// create array
|
|
1837 |
NewArray* n = new NewMultiArray(klass, dims, state_before);
|
|
1838 |
apush(append_split(n));
|
|
1839 |
}
|
|
1840 |
|
|
1841 |
|
|
1842 |
void GraphBuilder::throw_op(int bci) {
|
|
1843 |
// We require that the debug info for a Throw be the "state before"
|
|
1844 |
// the Throw (i.e., exception oop is still on TOS)
|
|
1845 |
ValueStack* state_before = state()->copy();
|
|
1846 |
Throw* t = new Throw(apop(), state_before);
|
|
1847 |
append_with_bci(t, bci);
|
|
1848 |
}
|
|
1849 |
|
|
1850 |
|
|
1851 |
Value GraphBuilder::round_fp(Value fp_value) {
|
|
1852 |
// no rounding needed if SSE2 is used
|
|
1853 |
if (RoundFPResults && UseSSE < 2) {
|
|
1854 |
// Must currently insert rounding node for doubleword values that
|
|
1855 |
// are results of expressions (i.e., not loads from memory or
|
|
1856 |
// constants)
|
|
1857 |
if (fp_value->type()->tag() == doubleTag &&
|
|
1858 |
fp_value->as_Constant() == NULL &&
|
|
1859 |
fp_value->as_Local() == NULL && // method parameters need no rounding
|
|
1860 |
fp_value->as_RoundFP() == NULL) {
|
|
1861 |
return append(new RoundFP(fp_value));
|
|
1862 |
}
|
|
1863 |
}
|
|
1864 |
return fp_value;
|
|
1865 |
}
|
|
1866 |
|
|
1867 |
|
|
1868 |
Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
|
|
1869 |
Canonicalizer canon(instr, bci);
|
|
1870 |
Instruction* i1 = canon.canonical();
|
|
1871 |
if (i1->bci() != -99) {
|
|
1872 |
// Canonicalizer returned an instruction which was already
|
|
1873 |
// appended so simply return it.
|
|
1874 |
return i1;
|
|
1875 |
} else if (UseLocalValueNumbering) {
|
|
1876 |
// Lookup the instruction in the ValueMap and add it to the map if
|
|
1877 |
// it's not found.
|
|
1878 |
Instruction* i2 = vmap()->find_insert(i1);
|
|
1879 |
if (i2 != i1) {
|
|
1880 |
// found an entry in the value map, so just return it.
|
|
1881 |
assert(i2->bci() != -1, "should already be linked");
|
|
1882 |
return i2;
|
|
1883 |
}
|
1612
|
1884 |
ValueNumberingEffects vne(vmap());
|
|
1885 |
i1->visit(&vne);
|
1
|
1886 |
}
|
|
1887 |
|
|
1888 |
if (i1->as_Phi() == NULL && i1->as_Local() == NULL) {
|
|
1889 |
// i1 was not eliminated => append it
|
|
1890 |
assert(i1->next() == NULL, "shouldn't already be linked");
|
|
1891 |
_last = _last->set_next(i1, canon.bci());
|
|
1892 |
if (++_instruction_count >= InstructionCountCutoff
|
|
1893 |
&& !bailed_out()) {
|
|
1894 |
// set the bailout state but complete normal processing. We
|
|
1895 |
// might do a little more work before noticing the bailout so we
|
|
1896 |
// want processing to continue normally until it's noticed.
|
|
1897 |
bailout("Method and/or inlining is too large");
|
|
1898 |
}
|
|
1899 |
|
|
1900 |
#ifndef PRODUCT
|
|
1901 |
if (PrintIRDuringConstruction) {
|
|
1902 |
InstructionPrinter ip;
|
|
1903 |
ip.print_line(i1);
|
|
1904 |
if (Verbose) {
|
|
1905 |
state()->print();
|
|
1906 |
}
|
|
1907 |
}
|
|
1908 |
#endif
|
|
1909 |
assert(_last == i1, "adjust code below");
|
|
1910 |
StateSplit* s = i1->as_StateSplit();
|
|
1911 |
if (s != NULL && i1->as_BlockEnd() == NULL) {
|
|
1912 |
if (EliminateFieldAccess) {
|
1612
|
1913 |
Intrinsic* intrinsic = s->as_Intrinsic();
|
1
|
1914 |
if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
|
|
1915 |
_memory->kill();
|
|
1916 |
}
|
|
1917 |
}
|
|
1918 |
s->set_state(state()->copy());
|
|
1919 |
}
|
|
1920 |
// set up exception handlers for this instruction if necessary
|
|
1921 |
if (i1->can_trap()) {
|
|
1922 |
assert(exception_state() != NULL || !has_handler(), "must have setup exception state");
|
|
1923 |
i1->set_exception_handlers(handle_exception(bci));
|
|
1924 |
}
|
|
1925 |
}
|
|
1926 |
return i1;
|
|
1927 |
}
|
|
1928 |
|
|
1929 |
|
|
1930 |
Instruction* GraphBuilder::append(Instruction* instr) {
|
|
1931 |
assert(instr->as_StateSplit() == NULL || instr->as_BlockEnd() != NULL, "wrong append used");
|
|
1932 |
return append_with_bci(instr, bci());
|
|
1933 |
}
|
|
1934 |
|
|
1935 |
|
|
1936 |
Instruction* GraphBuilder::append_split(StateSplit* instr) {
|
|
1937 |
return append_with_bci(instr, bci());
|
|
1938 |
}
|
|
1939 |
|
|
1940 |
|
|
1941 |
void GraphBuilder::null_check(Value value) {
|
|
1942 |
if (value->as_NewArray() != NULL || value->as_NewInstance() != NULL) {
|
|
1943 |
return;
|
|
1944 |
} else {
|
|
1945 |
Constant* con = value->as_Constant();
|
|
1946 |
if (con) {
|
|
1947 |
ObjectType* c = con->type()->as_ObjectType();
|
|
1948 |
if (c && c->is_loaded()) {
|
|
1949 |
ObjectConstant* oc = c->as_ObjectConstant();
|
|
1950 |
if (!oc || !oc->value()->is_null_object()) {
|
|
1951 |
return;
|
|
1952 |
}
|
|
1953 |
}
|
|
1954 |
}
|
|
1955 |
}
|
|
1956 |
append(new NullCheck(value, lock_stack()));
|
|
1957 |
}
|
|
1958 |
|
|
1959 |
|
|
1960 |
|
|
1961 |
XHandlers* GraphBuilder::handle_exception(int cur_bci) {
|
|
1962 |
// fast path if it is guaranteed that no exception handlers are present
|
|
1963 |
if (!has_handler()) {
|
|
1964 |
// TODO: check if return NULL is possible (avoids empty lists)
|
|
1965 |
return new XHandlers();
|
|
1966 |
}
|
|
1967 |
|
|
1968 |
XHandlers* exception_handlers = new XHandlers();
|
|
1969 |
ScopeData* cur_scope_data = scope_data();
|
|
1970 |
ValueStack* s = exception_state();
|
|
1971 |
int scope_count = 0;
|
|
1972 |
|
|
1973 |
assert(s != NULL, "exception state must be set");
|
|
1974 |
do {
|
|
1975 |
assert(cur_scope_data->scope() == s->scope(), "scopes do not match");
|
|
1976 |
assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");
|
|
1977 |
|
|
1978 |
// join with all potential exception handlers
|
|
1979 |
XHandlers* list = cur_scope_data->xhandlers();
|
|
1980 |
const int n = list->length();
|
|
1981 |
for (int i = 0; i < n; i++) {
|
|
1982 |
XHandler* h = list->handler_at(i);
|
|
1983 |
if (h->covers(cur_bci)) {
|
|
1984 |
// h is a potential exception handler => join it
|
|
1985 |
compilation()->set_has_exception_handlers(true);
|
|
1986 |
|
|
1987 |
BlockBegin* entry = h->entry_block();
|
|
1988 |
if (entry == block()) {
|
|
1989 |
// It's acceptable for an exception handler to cover itself
|
|
1990 |
// but we don't handle that in the parser currently. It's
|
|
1991 |
// very rare so we bailout instead of trying to handle it.
|
|
1992 |
BAILOUT_("exception handler covers itself", exception_handlers);
|
|
1993 |
}
|
|
1994 |
assert(entry->bci() == h->handler_bci(), "must match");
|
|
1995 |
assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
|
|
1996 |
|
|
1997 |
// previously this was a BAILOUT, but this is not necessary
|
|
1998 |
// now because asynchronous exceptions are not handled this way.
|
|
1999 |
assert(entry->state() == NULL || s->locks_size() == entry->state()->locks_size(), "locks do not match");
|
|
2000 |
|
|
2001 |
// xhandler start with an empty expression stack
|
|
2002 |
s->truncate_stack(cur_scope_data->caller_stack_size());
|
|
2003 |
|
|
2004 |
// Note: Usually this join must work. However, very
|
|
2005 |
// complicated jsr-ret structures where we don't ret from
|
|
2006 |
// the subroutine can cause the objects on the monitor
|
|
2007 |
// stacks to not match because blocks can be parsed twice.
|
|
2008 |
// The only test case we've seen so far which exhibits this
|
|
2009 |
// problem is caught by the infinite recursion test in
|
|
2010 |
// GraphBuilder::jsr() if the join doesn't work.
|
|
2011 |
if (!entry->try_merge(s)) {
|
|
2012 |
BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
|
|
2013 |
}
|
|
2014 |
|
|
2015 |
// add current state for correct handling of phi functions at begin of xhandler
|
|
2016 |
int phi_operand = entry->add_exception_state(s);
|
|
2017 |
|
|
2018 |
// add entry to the list of xhandlers of this block
|
|
2019 |
_block->add_exception_handler(entry);
|
|
2020 |
|
|
2021 |
// add back-edge from xhandler entry to this block
|
|
2022 |
if (!entry->is_predecessor(_block)) {
|
|
2023 |
entry->add_predecessor(_block);
|
|
2024 |
}
|
|
2025 |
|
|
2026 |
// clone XHandler because phi_operand and scope_count can not be shared
|
|
2027 |
XHandler* new_xhandler = new XHandler(h);
|
|
2028 |
new_xhandler->set_phi_operand(phi_operand);
|
|
2029 |
new_xhandler->set_scope_count(scope_count);
|
|
2030 |
exception_handlers->append(new_xhandler);
|
|
2031 |
|
|
2032 |
// fill in exception handler subgraph lazily
|
|
2033 |
assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet");
|
|
2034 |
cur_scope_data->add_to_work_list(entry);
|
|
2035 |
|
|
2036 |
// stop when reaching catchall
|
|
2037 |
if (h->catch_type() == 0) {
|
|
2038 |
return exception_handlers;
|
|
2039 |
}
|
|
2040 |
}
|
|
2041 |
}
|
|
2042 |
|
|
2043 |
// Set up iteration for next time.
|
|
2044 |
// If parsing a jsr, do not grab exception handlers from the
|
|
2045 |
// parent scopes for this method (already got them, and they
|
|
2046 |
// needed to be cloned)
|
|
2047 |
if (cur_scope_data->parsing_jsr()) {
|
|
2048 |
IRScope* tmp_scope = cur_scope_data->scope();
|
|
2049 |
while (cur_scope_data->parent() != NULL &&
|
|
2050 |
cur_scope_data->parent()->scope() == tmp_scope) {
|
|
2051 |
cur_scope_data = cur_scope_data->parent();
|
|
2052 |
}
|
|
2053 |
}
|
|
2054 |
if (cur_scope_data != NULL) {
|
|
2055 |
if (cur_scope_data->parent() != NULL) {
|
|
2056 |
// must use pop_scope instead of caller_state to preserve all monitors
|
|
2057 |
s = s->pop_scope();
|
|
2058 |
}
|
|
2059 |
cur_bci = cur_scope_data->scope()->caller_bci();
|
|
2060 |
cur_scope_data = cur_scope_data->parent();
|
|
2061 |
scope_count++;
|
|
2062 |
}
|
|
2063 |
} while (cur_scope_data != NULL);
|
|
2064 |
|
|
2065 |
return exception_handlers;
|
|
2066 |
}
|
|
2067 |
|
|
2068 |
|
|
2069 |
// Helper class for simplifying Phis.
|
|
2070 |
class PhiSimplifier : public BlockClosure {
|
|
2071 |
private:
|
|
2072 |
bool _has_substitutions;
|
|
2073 |
Value simplify(Value v);
|
|
2074 |
|
|
2075 |
public:
|
|
2076 |
PhiSimplifier(BlockBegin* start) : _has_substitutions(false) {
|
|
2077 |
start->iterate_preorder(this);
|
|
2078 |
if (_has_substitutions) {
|
|
2079 |
SubstitutionResolver sr(start);
|
|
2080 |
}
|
|
2081 |
}
|
|
2082 |
void block_do(BlockBegin* b);
|
|
2083 |
bool has_substitutions() const { return _has_substitutions; }
|
|
2084 |
};
|
|
2085 |
|
|
2086 |
|
|
2087 |
Value PhiSimplifier::simplify(Value v) {
|
|
2088 |
Phi* phi = v->as_Phi();
|
|
2089 |
|
|
2090 |
if (phi == NULL) {
|
|
2091 |
// no phi function
|
|
2092 |
return v;
|
|
2093 |
} else if (v->has_subst()) {
|
|
2094 |
// already substituted; subst can be phi itself -> simplify
|
|
2095 |
return simplify(v->subst());
|
|
2096 |
} else if (phi->is_set(Phi::cannot_simplify)) {
|
|
2097 |
// already tried to simplify phi before
|
|
2098 |
return phi;
|
|
2099 |
} else if (phi->is_set(Phi::visited)) {
|
|
2100 |
// break cycles in phi functions
|
|
2101 |
return phi;
|
|
2102 |
} else if (phi->type()->is_illegal()) {
|
|
2103 |
// illegal phi functions are ignored anyway
|
|
2104 |
return phi;
|
|
2105 |
|
|
2106 |
} else {
|
|
2107 |
// mark phi function as processed to break cycles in phi functions
|
|
2108 |
phi->set(Phi::visited);
|
|
2109 |
|
|
2110 |
// simplify x = [y, x] and x = [y, y] to y
|
|
2111 |
Value subst = NULL;
|
|
2112 |
int opd_count = phi->operand_count();
|
|
2113 |
for (int i = 0; i < opd_count; i++) {
|
|
2114 |
Value opd = phi->operand_at(i);
|
|
2115 |
assert(opd != NULL, "Operand must exist!");
|
|
2116 |
|
|
2117 |
if (opd->type()->is_illegal()) {
|
|
2118 |
// if one operand is illegal, the entire phi function is illegal
|
|
2119 |
phi->make_illegal();
|
|
2120 |
phi->clear(Phi::visited);
|
|
2121 |
return phi;
|
|
2122 |
}
|
|
2123 |
|
|
2124 |
Value new_opd = simplify(opd);
|
|
2125 |
assert(new_opd != NULL, "Simplified operand must exist!");
|
|
2126 |
|
|
2127 |
if (new_opd != phi && new_opd != subst) {
|
|
2128 |
if (subst == NULL) {
|
|
2129 |
subst = new_opd;
|
|
2130 |
} else {
|
|
2131 |
// no simplification possible
|
|
2132 |
phi->set(Phi::cannot_simplify);
|
|
2133 |
phi->clear(Phi::visited);
|
|
2134 |
return phi;
|
|
2135 |
}
|
|
2136 |
}
|
|
2137 |
}
|
|
2138 |
|
|
2139 |
// sucessfully simplified phi function
|
|
2140 |
assert(subst != NULL, "illegal phi function");
|
|
2141 |
_has_substitutions = true;
|
|
2142 |
phi->clear(Phi::visited);
|
|
2143 |
phi->set_subst(subst);
|
|
2144 |
|
|
2145 |
#ifndef PRODUCT
|
|
2146 |
if (PrintPhiFunctions) {
|
|
2147 |
tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
|
|
2148 |
}
|
|
2149 |
#endif
|
|
2150 |
|
|
2151 |
return subst;
|
|
2152 |
}
|
|
2153 |
}
|
|
2154 |
|
|
2155 |
|
|
2156 |
void PhiSimplifier::block_do(BlockBegin* b) {
|
|
2157 |
for_each_phi_fun(b, phi,
|
|
2158 |
simplify(phi);
|
|
2159 |
);
|
|
2160 |
|
|
2161 |
#ifdef ASSERT
|
|
2162 |
for_each_phi_fun(b, phi,
|
|
2163 |
assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification");
|
|
2164 |
);
|
|
2165 |
|
|
2166 |
ValueStack* state = b->state()->caller_state();
|
|
2167 |
int index;
|
|
2168 |
Value value;
|
|
2169 |
for_each_state(state) {
|
|
2170 |
for_each_local_value(state, index, value) {
|
|
2171 |
Phi* phi = value->as_Phi();
|
|
2172 |
assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state");
|
|
2173 |
}
|
|
2174 |
}
|
|
2175 |
#endif
|
|
2176 |
}
|
|
2177 |
|
|
2178 |
// This method is called after all blocks are filled with HIR instructions
|
|
2179 |
// It eliminates all Phi functions of the form x = [y, y] and x = [y, x]
|
|
2180 |
void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) {
|
|
2181 |
PhiSimplifier simplifier(start);
|
|
2182 |
}
|
|
2183 |
|
|
2184 |
|
|
2185 |
void GraphBuilder::connect_to_end(BlockBegin* beg) {
|
|
2186 |
// setup iteration
|
|
2187 |
kill_all();
|
|
2188 |
_block = beg;
|
|
2189 |
_state = beg->state()->copy();
|
|
2190 |
_last = beg;
|
|
2191 |
iterate_bytecodes_for_block(beg->bci());
|
|
2192 |
}
|
|
2193 |
|
|
2194 |
|
|
2195 |
BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
|
|
2196 |
#ifndef PRODUCT
|
|
2197 |
if (PrintIRDuringConstruction) {
|
|
2198 |
tty->cr();
|
|
2199 |
InstructionPrinter ip;
|
|
2200 |
ip.print_instr(_block); tty->cr();
|
|
2201 |
ip.print_stack(_block->state()); tty->cr();
|
|
2202 |
ip.print_inline_level(_block);
|
|
2203 |
ip.print_head();
|
|
2204 |
tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size());
|
|
2205 |
}
|
|
2206 |
#endif
|
|
2207 |
_skip_block = false;
|
|
2208 |
assert(state() != NULL, "ValueStack missing!");
|
|
2209 |
ciBytecodeStream s(method());
|
|
2210 |
s.reset_to_bci(bci);
|
|
2211 |
int prev_bci = bci;
|
|
2212 |
scope_data()->set_stream(&s);
|
|
2213 |
// iterate
|
|
2214 |
Bytecodes::Code code = Bytecodes::_illegal;
|
|
2215 |
bool push_exception = false;
|
|
2216 |
|
|
2217 |
if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == NULL) {
|
|
2218 |
// first thing in the exception entry block should be the exception object.
|
|
2219 |
push_exception = true;
|
|
2220 |
}
|
|
2221 |
|
|
2222 |
while (!bailed_out() && last()->as_BlockEnd() == NULL &&
|
|
2223 |
(code = stream()->next()) != ciBytecodeStream::EOBC() &&
|
|
2224 |
(block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
|
|
2225 |
|
|
2226 |
if (has_handler() && can_trap(method(), code)) {
|
|
2227 |
// copy the state because it is modified before handle_exception is called
|
|
2228 |
set_exception_state(state()->copy());
|
|
2229 |
} else {
|
|
2230 |
// handle_exception is not called for this bytecode
|
|
2231 |
set_exception_state(NULL);
|
|
2232 |
}
|
|
2233 |
|
|
2234 |
// Check for active jsr during OSR compilation
|
|
2235 |
if (compilation()->is_osr_compile()
|
|
2236 |
&& scope()->is_top_scope()
|
|
2237 |
&& parsing_jsr()
|
|
2238 |
&& s.cur_bci() == compilation()->osr_bci()) {
|
|
2239 |
bailout("OSR not supported while a jsr is active");
|
|
2240 |
}
|
|
2241 |
|
|
2242 |
if (push_exception) {
|
|
2243 |
apush(append(new ExceptionObject()));
|
|
2244 |
push_exception = false;
|
|
2245 |
}
|
|
2246 |
|
|
2247 |
// handle bytecode
|
|
2248 |
switch (code) {
|
|
2249 |
case Bytecodes::_nop : /* nothing to do */ break;
|
|
2250 |
case Bytecodes::_aconst_null : apush(append(new Constant(objectNull ))); break;
|
|
2251 |
case Bytecodes::_iconst_m1 : ipush(append(new Constant(new IntConstant (-1)))); break;
|
|
2252 |
case Bytecodes::_iconst_0 : ipush(append(new Constant(intZero ))); break;
|
|
2253 |
case Bytecodes::_iconst_1 : ipush(append(new Constant(intOne ))); break;
|
|
2254 |
case Bytecodes::_iconst_2 : ipush(append(new Constant(new IntConstant ( 2)))); break;
|
|
2255 |
case Bytecodes::_iconst_3 : ipush(append(new Constant(new IntConstant ( 3)))); break;
|
|
2256 |
case Bytecodes::_iconst_4 : ipush(append(new Constant(new IntConstant ( 4)))); break;
|
|
2257 |
case Bytecodes::_iconst_5 : ipush(append(new Constant(new IntConstant ( 5)))); break;
|
|
2258 |
case Bytecodes::_lconst_0 : lpush(append(new Constant(new LongConstant ( 0)))); break;
|
|
2259 |
case Bytecodes::_lconst_1 : lpush(append(new Constant(new LongConstant ( 1)))); break;
|
|
2260 |
case Bytecodes::_fconst_0 : fpush(append(new Constant(new FloatConstant ( 0)))); break;
|
|
2261 |
case Bytecodes::_fconst_1 : fpush(append(new Constant(new FloatConstant ( 1)))); break;
|
|
2262 |
case Bytecodes::_fconst_2 : fpush(append(new Constant(new FloatConstant ( 2)))); break;
|
|
2263 |
case Bytecodes::_dconst_0 : dpush(append(new Constant(new DoubleConstant( 0)))); break;
|
|
2264 |
case Bytecodes::_dconst_1 : dpush(append(new Constant(new DoubleConstant( 1)))); break;
|
|
2265 |
case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break;
|
|
2266 |
case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break;
|
|
2267 |
case Bytecodes::_ldc : // fall through
|
|
2268 |
case Bytecodes::_ldc_w : // fall through
|
|
2269 |
case Bytecodes::_ldc2_w : load_constant(); break;
|
|
2270 |
case Bytecodes::_iload : load_local(intType , s.get_index()); break;
|
|
2271 |
case Bytecodes::_lload : load_local(longType , s.get_index()); break;
|
|
2272 |
case Bytecodes::_fload : load_local(floatType , s.get_index()); break;
|
|
2273 |
case Bytecodes::_dload : load_local(doubleType , s.get_index()); break;
|
|
2274 |
case Bytecodes::_aload : load_local(instanceType, s.get_index()); break;
|
|
2275 |
case Bytecodes::_iload_0 : load_local(intType , 0); break;
|
|
2276 |
case Bytecodes::_iload_1 : load_local(intType , 1); break;
|
|
2277 |
case Bytecodes::_iload_2 : load_local(intType , 2); break;
|
|
2278 |
case Bytecodes::_iload_3 : load_local(intType , 3); break;
|
|
2279 |
case Bytecodes::_lload_0 : load_local(longType , 0); break;
|
|
2280 |
case Bytecodes::_lload_1 : load_local(longType , 1); break;
|
|
2281 |
case Bytecodes::_lload_2 : load_local(longType , 2); break;
|
|
2282 |
case Bytecodes::_lload_3 : load_local(longType , 3); break;
|
|
2283 |
case Bytecodes::_fload_0 : load_local(floatType , 0); break;
|
|
2284 |
case Bytecodes::_fload_1 : load_local(floatType , 1); break;
|
|
2285 |
case Bytecodes::_fload_2 : load_local(floatType , 2); break;
|
|
2286 |
case Bytecodes::_fload_3 : load_local(floatType , 3); break;
|
|
2287 |
case Bytecodes::_dload_0 : load_local(doubleType, 0); break;
|
|
2288 |
case Bytecodes::_dload_1 : load_local(doubleType, 1); break;
|
|
2289 |
case Bytecodes::_dload_2 : load_local(doubleType, 2); break;
|
|
2290 |
case Bytecodes::_dload_3 : load_local(doubleType, 3); break;
|
|
2291 |
case Bytecodes::_aload_0 : load_local(objectType, 0); break;
|
|
2292 |
case Bytecodes::_aload_1 : load_local(objectType, 1); break;
|
|
2293 |
case Bytecodes::_aload_2 : load_local(objectType, 2); break;
|
|
2294 |
case Bytecodes::_aload_3 : load_local(objectType, 3); break;
|
|
2295 |
case Bytecodes::_iaload : load_indexed(T_INT ); break;
|
|
2296 |
case Bytecodes::_laload : load_indexed(T_LONG ); break;
|
|
2297 |
case Bytecodes::_faload : load_indexed(T_FLOAT ); break;
|
|
2298 |
case Bytecodes::_daload : load_indexed(T_DOUBLE); break;
|
|
2299 |
case Bytecodes::_aaload : load_indexed(T_OBJECT); break;
|
|
2300 |
case Bytecodes::_baload : load_indexed(T_BYTE ); break;
|
|
2301 |
case Bytecodes::_caload : load_indexed(T_CHAR ); break;
|
|
2302 |
case Bytecodes::_saload : load_indexed(T_SHORT ); break;
|
|
2303 |
case Bytecodes::_istore : store_local(intType , s.get_index()); break;
|
|
2304 |
case Bytecodes::_lstore : store_local(longType , s.get_index()); break;
|
|
2305 |
case Bytecodes::_fstore : store_local(floatType , s.get_index()); break;
|
|
2306 |
case Bytecodes::_dstore : store_local(doubleType, s.get_index()); break;
|
|
2307 |
case Bytecodes::_astore : store_local(objectType, s.get_index()); break;
|
|
2308 |
case Bytecodes::_istore_0 : store_local(intType , 0); break;
|
|
2309 |
case Bytecodes::_istore_1 : store_local(intType , 1); break;
|
|
2310 |
case Bytecodes::_istore_2 : store_local(intType , 2); break;
|
|
2311 |
case Bytecodes::_istore_3 : store_local(intType , 3); break;
|
|
2312 |
case Bytecodes::_lstore_0 : store_local(longType , 0); break;
|
|
2313 |
case Bytecodes::_lstore_1 : store_local(longType , 1); break;
|
|
2314 |
case Bytecodes::_lstore_2 : store_local(longType , 2); break;
|
|
2315 |
case Bytecodes::_lstore_3 : store_local(longType , 3); break;
|
|
2316 |
case Bytecodes::_fstore_0 : store_local(floatType , 0); break;
|
|
2317 |
case Bytecodes::_fstore_1 : store_local(floatType , 1); break;
|
|
2318 |
case Bytecodes::_fstore_2 : store_local(floatType , 2); break;
|
|
2319 |
case Bytecodes::_fstore_3 : store_local(floatType , 3); break;
|
|
2320 |
case Bytecodes::_dstore_0 : store_local(doubleType, 0); break;
|
|
2321 |
case Bytecodes::_dstore_1 : store_local(doubleType, 1); break;
|
|
2322 |
case Bytecodes::_dstore_2 : store_local(doubleType, 2); break;
|
|
2323 |
case Bytecodes::_dstore_3 : store_local(doubleType, 3); break;
|
|
2324 |
case Bytecodes::_astore_0 : store_local(objectType, 0); break;
|
|
2325 |
case Bytecodes::_astore_1 : store_local(objectType, 1); break;
|
|
2326 |
case Bytecodes::_astore_2 : store_local(objectType, 2); break;
|
|
2327 |
case Bytecodes::_astore_3 : store_local(objectType, 3); break;
|
|
2328 |
case Bytecodes::_iastore : store_indexed(T_INT ); break;
|
|
2329 |
case Bytecodes::_lastore : store_indexed(T_LONG ); break;
|
|
2330 |
case Bytecodes::_fastore : store_indexed(T_FLOAT ); break;
|
|
2331 |
case Bytecodes::_dastore : store_indexed(T_DOUBLE); break;
|
|
2332 |
case Bytecodes::_aastore : store_indexed(T_OBJECT); break;
|
|
2333 |
case Bytecodes::_bastore : store_indexed(T_BYTE ); break;
|
|
2334 |
case Bytecodes::_castore : store_indexed(T_CHAR ); break;
|
|
2335 |
case Bytecodes::_sastore : store_indexed(T_SHORT ); break;
|
|
2336 |
case Bytecodes::_pop : // fall through
|
|
2337 |
case Bytecodes::_pop2 : // fall through
|
|
2338 |
case Bytecodes::_dup : // fall through
|
|
2339 |
case Bytecodes::_dup_x1 : // fall through
|
|
2340 |
case Bytecodes::_dup_x2 : // fall through
|
|
2341 |
case Bytecodes::_dup2 : // fall through
|
|
2342 |
case Bytecodes::_dup2_x1 : // fall through
|
|
2343 |
case Bytecodes::_dup2_x2 : // fall through
|
|
2344 |
case Bytecodes::_swap : stack_op(code); break;
|
|
2345 |
case Bytecodes::_iadd : arithmetic_op(intType , code); break;
|
|
2346 |
case Bytecodes::_ladd : arithmetic_op(longType , code); break;
|
|
2347 |
case Bytecodes::_fadd : arithmetic_op(floatType , code); break;
|
|
2348 |
case Bytecodes::_dadd : arithmetic_op(doubleType, code); break;
|
|
2349 |
case Bytecodes::_isub : arithmetic_op(intType , code); break;
|
|
2350 |
case Bytecodes::_lsub : arithmetic_op(longType , code); break;
|
|
2351 |
case Bytecodes::_fsub : arithmetic_op(floatType , code); break;
|
|
2352 |
case Bytecodes::_dsub : arithmetic_op(doubleType, code); break;
|
|
2353 |
case Bytecodes::_imul : arithmetic_op(intType , code); break;
|
|
2354 |
case Bytecodes::_lmul : arithmetic_op(longType , code); break;
|
|
2355 |
case Bytecodes::_fmul : arithmetic_op(floatType , code); break;
|
|
2356 |
case Bytecodes::_dmul : arithmetic_op(doubleType, code); break;
|
|
2357 |
case Bytecodes::_idiv : arithmetic_op(intType , code, lock_stack()); break;
|
|
2358 |
case Bytecodes::_ldiv : arithmetic_op(longType , code, lock_stack()); break;
|
|
2359 |
case Bytecodes::_fdiv : arithmetic_op(floatType , code); break;
|
|
2360 |
case Bytecodes::_ddiv : arithmetic_op(doubleType, code); break;
|
|
2361 |
case Bytecodes::_irem : arithmetic_op(intType , code, lock_stack()); break;
|
|
2362 |
case Bytecodes::_lrem : arithmetic_op(longType , code, lock_stack()); break;
|
|
2363 |
case Bytecodes::_frem : arithmetic_op(floatType , code); break;
|
|
2364 |
case Bytecodes::_drem : arithmetic_op(doubleType, code); break;
|
|
2365 |
case Bytecodes::_ineg : negate_op(intType ); break;
|
|
2366 |
case Bytecodes::_lneg : negate_op(longType ); break;
|
|
2367 |
case Bytecodes::_fneg : negate_op(floatType ); break;
|
|
2368 |
case Bytecodes::_dneg : negate_op(doubleType); break;
|
|
2369 |
case Bytecodes::_ishl : shift_op(intType , code); break;
|
|
2370 |
case Bytecodes::_lshl : shift_op(longType, code); break;
|
|
2371 |
case Bytecodes::_ishr : shift_op(intType , code); break;
|
|
2372 |
case Bytecodes::_lshr : shift_op(longType, code); break;
|
|
2373 |
case Bytecodes::_iushr : shift_op(intType , code); break;
|
|
2374 |
case Bytecodes::_lushr : shift_op(longType, code); break;
|
|
2375 |
case Bytecodes::_iand : logic_op(intType , code); break;
|
|
2376 |
case Bytecodes::_land : logic_op(longType, code); break;
|
|
2377 |
case Bytecodes::_ior : logic_op(intType , code); break;
|
|
2378 |
case Bytecodes::_lor : logic_op(longType, code); break;
|
|
2379 |
case Bytecodes::_ixor : logic_op(intType , code); break;
|
|
2380 |
case Bytecodes::_lxor : logic_op(longType, code); break;
|
|
2381 |
case Bytecodes::_iinc : increment(); break;
|
|
2382 |
case Bytecodes::_i2l : convert(code, T_INT , T_LONG ); break;
|
|
2383 |
case Bytecodes::_i2f : convert(code, T_INT , T_FLOAT ); break;
|
|
2384 |
case Bytecodes::_i2d : convert(code, T_INT , T_DOUBLE); break;
|
|
2385 |
case Bytecodes::_l2i : convert(code, T_LONG , T_INT ); break;
|
|
2386 |
case Bytecodes::_l2f : convert(code, T_LONG , T_FLOAT ); break;
|
|
2387 |
case Bytecodes::_l2d : convert(code, T_LONG , T_DOUBLE); break;
|
|
2388 |
case Bytecodes::_f2i : convert(code, T_FLOAT , T_INT ); break;
|
|
2389 |
case Bytecodes::_f2l : convert(code, T_FLOAT , T_LONG ); break;
|
|
2390 |
case Bytecodes::_f2d : convert(code, T_FLOAT , T_DOUBLE); break;
|
|
2391 |
case Bytecodes::_d2i : convert(code, T_DOUBLE, T_INT ); break;
|
|
2392 |
case Bytecodes::_d2l : convert(code, T_DOUBLE, T_LONG ); break;
|
|
2393 |
case Bytecodes::_d2f : convert(code, T_DOUBLE, T_FLOAT ); break;
|
|
2394 |
case Bytecodes::_i2b : convert(code, T_INT , T_BYTE ); break;
|
|
2395 |
case Bytecodes::_i2c : convert(code, T_INT , T_CHAR ); break;
|
|
2396 |
case Bytecodes::_i2s : convert(code, T_INT , T_SHORT ); break;
|
|
2397 |
case Bytecodes::_lcmp : compare_op(longType , code); break;
|
|
2398 |
case Bytecodes::_fcmpl : compare_op(floatType , code); break;
|
|
2399 |
case Bytecodes::_fcmpg : compare_op(floatType , code); break;
|
|
2400 |
case Bytecodes::_dcmpl : compare_op(doubleType, code); break;
|
|
2401 |
case Bytecodes::_dcmpg : compare_op(doubleType, code); break;
|
|
2402 |
case Bytecodes::_ifeq : if_zero(intType , If::eql); break;
|
|
2403 |
case Bytecodes::_ifne : if_zero(intType , If::neq); break;
|
|
2404 |
case Bytecodes::_iflt : if_zero(intType , If::lss); break;
|
|
2405 |
case Bytecodes::_ifge : if_zero(intType , If::geq); break;
|
|
2406 |
case Bytecodes::_ifgt : if_zero(intType , If::gtr); break;
|
|
2407 |
case Bytecodes::_ifle : if_zero(intType , If::leq); break;
|
|
2408 |
case Bytecodes::_if_icmpeq : if_same(intType , If::eql); break;
|
|
2409 |
case Bytecodes::_if_icmpne : if_same(intType , If::neq); break;
|
|
2410 |
case Bytecodes::_if_icmplt : if_same(intType , If::lss); break;
|
|
2411 |
case Bytecodes::_if_icmpge : if_same(intType , If::geq); break;
|
|
2412 |
case Bytecodes::_if_icmpgt : if_same(intType , If::gtr); break;
|
|
2413 |
case Bytecodes::_if_icmple : if_same(intType , If::leq); break;
|
|
2414 |
case Bytecodes::_if_acmpeq : if_same(objectType, If::eql); break;
|
|
2415 |
case Bytecodes::_if_acmpne : if_same(objectType, If::neq); break;
|
|
2416 |
case Bytecodes::_goto : _goto(s.cur_bci(), s.get_dest()); break;
|
|
2417 |
case Bytecodes::_jsr : jsr(s.get_dest()); break;
|
|
2418 |
case Bytecodes::_ret : ret(s.get_index()); break;
|
|
2419 |
case Bytecodes::_tableswitch : table_switch(); break;
|
|
2420 |
case Bytecodes::_lookupswitch : lookup_switch(); break;
|
|
2421 |
case Bytecodes::_ireturn : method_return(ipop()); break;
|
|
2422 |
case Bytecodes::_lreturn : method_return(lpop()); break;
|
|
2423 |
case Bytecodes::_freturn : method_return(fpop()); break;
|
|
2424 |
case Bytecodes::_dreturn : method_return(dpop()); break;
|
|
2425 |
case Bytecodes::_areturn : method_return(apop()); break;
|
|
2426 |
case Bytecodes::_return : method_return(NULL ); break;
|
|
2427 |
case Bytecodes::_getstatic : // fall through
|
|
2428 |
case Bytecodes::_putstatic : // fall through
|
|
2429 |
case Bytecodes::_getfield : // fall through
|
|
2430 |
case Bytecodes::_putfield : access_field(code); break;
|
|
2431 |
case Bytecodes::_invokevirtual : // fall through
|
|
2432 |
case Bytecodes::_invokespecial : // fall through
|
|
2433 |
case Bytecodes::_invokestatic : // fall through
|
|
2434 |
case Bytecodes::_invokeinterface: invoke(code); break;
|
|
2435 |
case Bytecodes::_xxxunusedxxx : ShouldNotReachHere(); break;
|
|
2436 |
case Bytecodes::_new : new_instance(s.get_index_big()); break;
|
|
2437 |
case Bytecodes::_newarray : new_type_array(); break;
|
|
2438 |
case Bytecodes::_anewarray : new_object_array(); break;
|
|
2439 |
case Bytecodes::_arraylength : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
|
|
2440 |
case Bytecodes::_athrow : throw_op(s.cur_bci()); break;
|
|
2441 |
case Bytecodes::_checkcast : check_cast(s.get_index_big()); break;
|
|
2442 |
case Bytecodes::_instanceof : instance_of(s.get_index_big()); break;
|
|
2443 |
// Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
|
|
2444 |
case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break;
|
|
2445 |
case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break;
|
|
2446 |
case Bytecodes::_wide : ShouldNotReachHere(); break;
|
|
2447 |
case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break;
|
|
2448 |
case Bytecodes::_ifnull : if_null(objectType, If::eql); break;
|
|
2449 |
case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break;
|
|
2450 |
case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break;
|
|
2451 |
case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break;
|
|
2452 |
case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", NULL);
|
|
2453 |
default : ShouldNotReachHere(); break;
|
|
2454 |
}
|
|
2455 |
// save current bci to setup Goto at the end
|
|
2456 |
prev_bci = s.cur_bci();
|
|
2457 |
}
|
|
2458 |
CHECK_BAILOUT_(NULL);
|
|
2459 |
// stop processing of this block (see try_inline_full)
|
|
2460 |
if (_skip_block) {
|
|
2461 |
_skip_block = false;
|
|
2462 |
assert(_last && _last->as_BlockEnd(), "");
|
|
2463 |
return _last->as_BlockEnd();
|
|
2464 |
}
|
|
2465 |
// if there are any, check if last instruction is a BlockEnd instruction
|
|
2466 |
BlockEnd* end = last()->as_BlockEnd();
|
|
2467 |
if (end == NULL) {
|
|
2468 |
// all blocks must end with a BlockEnd instruction => add a Goto
|
|
2469 |
end = new Goto(block_at(s.cur_bci()), false);
|
|
2470 |
_last = _last->set_next(end, prev_bci);
|
|
2471 |
}
|
|
2472 |
assert(end == last()->as_BlockEnd(), "inconsistency");
|
|
2473 |
|
|
2474 |
// if the method terminates, we don't need the stack anymore
|
|
2475 |
if (end->as_Return() != NULL) {
|
|
2476 |
state()->clear_stack();
|
|
2477 |
} else if (end->as_Throw() != NULL) {
|
|
2478 |
// May have exception handler in caller scopes
|
|
2479 |
state()->truncate_stack(scope()->lock_stack_size());
|
|
2480 |
}
|
|
2481 |
|
|
2482 |
// connect to begin & set state
|
|
2483 |
// NOTE that inlining may have changed the block we are parsing
|
|
2484 |
block()->set_end(end);
|
|
2485 |
end->set_state(state());
|
|
2486 |
// propagate state
|
|
2487 |
for (int i = end->number_of_sux() - 1; i >= 0; i--) {
|
|
2488 |
BlockBegin* sux = end->sux_at(i);
|
|
2489 |
assert(sux->is_predecessor(block()), "predecessor missing");
|
|
2490 |
// be careful, bailout if bytecodes are strange
|
|
2491 |
if (!sux->try_merge(state())) BAILOUT_("block join failed", NULL);
|
|
2492 |
scope_data()->add_to_work_list(end->sux_at(i));
|
|
2493 |
}
|
|
2494 |
|
|
2495 |
scope_data()->set_stream(NULL);
|
|
2496 |
|
|
2497 |
// done
|
|
2498 |
return end;
|
|
2499 |
}
|
|
2500 |
|
|
2501 |
|
|
2502 |
void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) {
|
|
2503 |
do {
|
|
2504 |
if (start_in_current_block_for_inlining && !bailed_out()) {
|
|
2505 |
iterate_bytecodes_for_block(0);
|
|
2506 |
start_in_current_block_for_inlining = false;
|
|
2507 |
} else {
|
|
2508 |
BlockBegin* b;
|
|
2509 |
while ((b = scope_data()->remove_from_work_list()) != NULL) {
|
|
2510 |
if (!b->is_set(BlockBegin::was_visited_flag)) {
|
|
2511 |
if (b->is_set(BlockBegin::osr_entry_flag)) {
|
|
2512 |
// we're about to parse the osr entry block, so make sure
|
|
2513 |
// we setup the OSR edge leading into this block so that
|
|
2514 |
// Phis get setup correctly.
|
|
2515 |
setup_osr_entry_block();
|
|
2516 |
// this is no longer the osr entry block, so clear it.
|
|
2517 |
b->clear(BlockBegin::osr_entry_flag);
|
|
2518 |
}
|
|
2519 |
b->set(BlockBegin::was_visited_flag);
|
|
2520 |
connect_to_end(b);
|
|
2521 |
}
|
|
2522 |
}
|
|
2523 |
}
|
|
2524 |
} while (!bailed_out() && !scope_data()->is_work_list_empty());
|
|
2525 |
}
|
|
2526 |
|
|
2527 |
|
|
2528 |
bool GraphBuilder::_is_initialized = false;
|
|
2529 |
bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes];
|
|
2530 |
bool GraphBuilder::_is_async[Bytecodes::number_of_java_codes];
|
|
2531 |
|
|
2532 |
void GraphBuilder::initialize() {
|
|
2533 |
// make sure initialization happens only once (need a
|
|
2534 |
// lock here, if we allow the compiler to be re-entrant)
|
|
2535 |
if (is_initialized()) return;
|
|
2536 |
_is_initialized = true;
|
|
2537 |
|
|
2538 |
// the following bytecodes are assumed to potentially
|
|
2539 |
// throw exceptions in compiled code - note that e.g.
|
|
2540 |
// monitorexit & the return bytecodes do not throw
|
|
2541 |
// exceptions since monitor pairing proved that they
|
|
2542 |
// succeed (if monitor pairing succeeded)
|
|
2543 |
Bytecodes::Code can_trap_list[] =
|
|
2544 |
{ Bytecodes::_ldc
|
|
2545 |
, Bytecodes::_ldc_w
|
|
2546 |
, Bytecodes::_ldc2_w
|
|
2547 |
, Bytecodes::_iaload
|
|
2548 |
, Bytecodes::_laload
|
|
2549 |
, Bytecodes::_faload
|
|
2550 |
, Bytecodes::_daload
|
|
2551 |
, Bytecodes::_aaload
|
|
2552 |
, Bytecodes::_baload
|
|
2553 |
, Bytecodes::_caload
|
|
2554 |
, Bytecodes::_saload
|
|
2555 |
, Bytecodes::_iastore
|
|
2556 |
, Bytecodes::_lastore
|
|
2557 |
, Bytecodes::_fastore
|
|
2558 |
, Bytecodes::_dastore
|
|
2559 |
, Bytecodes::_aastore
|
|
2560 |
, Bytecodes::_bastore
|
|
2561 |
, Bytecodes::_castore
|
|
2562 |
, Bytecodes::_sastore
|
|
2563 |
, Bytecodes::_idiv
|
|
2564 |
, Bytecodes::_ldiv
|
|
2565 |
, Bytecodes::_irem
|
|
2566 |
, Bytecodes::_lrem
|
|
2567 |
, Bytecodes::_getstatic
|
|
2568 |
, Bytecodes::_putstatic
|
|
2569 |
, Bytecodes::_getfield
|
|
2570 |
, Bytecodes::_putfield
|
|
2571 |
, Bytecodes::_invokevirtual
|
|
2572 |
, Bytecodes::_invokespecial
|
|
2573 |
, Bytecodes::_invokestatic
|
|
2574 |
, Bytecodes::_invokeinterface
|
|
2575 |
, Bytecodes::_new
|
|
2576 |
, Bytecodes::_newarray
|
|
2577 |
, Bytecodes::_anewarray
|
|
2578 |
, Bytecodes::_arraylength
|
|
2579 |
, Bytecodes::_athrow
|
|
2580 |
, Bytecodes::_checkcast
|
|
2581 |
, Bytecodes::_instanceof
|
|
2582 |
, Bytecodes::_monitorenter
|
|
2583 |
, Bytecodes::_multianewarray
|
|
2584 |
};
|
|
2585 |
|
|
2586 |
// the following bytecodes are assumed to potentially
|
|
2587 |
// throw asynchronous exceptions in compiled code due
|
|
2588 |
// to safepoints (note: these entries could be merged
|
|
2589 |
// with the can_trap_list - however, we need to know
|
|
2590 |
// which ones are asynchronous for now - see also the
|
|
2591 |
// comment in GraphBuilder::handle_exception)
|
|
2592 |
Bytecodes::Code is_async_list[] =
|
|
2593 |
{ Bytecodes::_ifeq
|
|
2594 |
, Bytecodes::_ifne
|
|
2595 |
, Bytecodes::_iflt
|
|
2596 |
, Bytecodes::_ifge
|
|
2597 |
, Bytecodes::_ifgt
|
|
2598 |
, Bytecodes::_ifle
|
|
2599 |
, Bytecodes::_if_icmpeq
|
|
2600 |
, Bytecodes::_if_icmpne
|
|
2601 |
, Bytecodes::_if_icmplt
|
|
2602 |
, Bytecodes::_if_icmpge
|
|
2603 |
, Bytecodes::_if_icmpgt
|
|
2604 |
, Bytecodes::_if_icmple
|
|
2605 |
, Bytecodes::_if_acmpeq
|
|
2606 |
, Bytecodes::_if_acmpne
|
|
2607 |
, Bytecodes::_goto
|
|
2608 |
, Bytecodes::_jsr
|
|
2609 |
, Bytecodes::_ret
|
|
2610 |
, Bytecodes::_tableswitch
|
|
2611 |
, Bytecodes::_lookupswitch
|
|
2612 |
, Bytecodes::_ireturn
|
|
2613 |
, Bytecodes::_lreturn
|
|
2614 |
, Bytecodes::_freturn
|
|
2615 |
, Bytecodes::_dreturn
|
|
2616 |
, Bytecodes::_areturn
|
|
2617 |
, Bytecodes::_return
|
|
2618 |
, Bytecodes::_ifnull
|
|
2619 |
, Bytecodes::_ifnonnull
|
|
2620 |
, Bytecodes::_goto_w
|
|
2621 |
, Bytecodes::_jsr_w
|
|
2622 |
};
|
|
2623 |
|
|
2624 |
// inititialize trap tables
|
|
2625 |
for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
|
|
2626 |
_can_trap[i] = false;
|
|
2627 |
_is_async[i] = false;
|
|
2628 |
}
|
|
2629 |
// set standard trap info
|
|
2630 |
for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) {
|
|
2631 |
_can_trap[can_trap_list[j]] = true;
|
|
2632 |
}
|
|
2633 |
|
|
2634 |
// We now deoptimize if an asynchronous exception is thrown. This
|
|
2635 |
// considerably cleans up corner case issues related to javac's
|
|
2636 |
// incorrect exception handler ranges for async exceptions and
|
|
2637 |
// allows us to precisely analyze the types of exceptions from
|
|
2638 |
// certain bytecodes.
|
|
2639 |
if (!(DeoptC1 && DeoptOnAsyncException)) {
|
|
2640 |
// set asynchronous trap info
|
|
2641 |
for (uint k = 0; k < ARRAY_SIZE(is_async_list); k++) {
|
|
2642 |
assert(!_can_trap[is_async_list[k]], "can_trap_list and is_async_list should be disjoint");
|
|
2643 |
_can_trap[is_async_list[k]] = true;
|
|
2644 |
_is_async[is_async_list[k]] = true;
|
|
2645 |
}
|
|
2646 |
}
|
|
2647 |
}
|
|
2648 |
|
|
2649 |
|
|
2650 |
BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) {
|
|
2651 |
assert(entry->is_set(f), "entry/flag mismatch");
|
|
2652 |
// create header block
|
|
2653 |
BlockBegin* h = new BlockBegin(entry->bci());
|
|
2654 |
h->set_depth_first_number(0);
|
|
2655 |
|
|
2656 |
Value l = h;
|
|
2657 |
if (profile_branches()) {
|
|
2658 |
// Increment the invocation count on entry to the method. We
|
|
2659 |
// can't use profile_invocation here because append isn't setup to
|
|
2660 |
// work properly at this point. The instruction have to be
|
|
2661 |
// appended to the instruction stream by hand.
|
|
2662 |
Value m = new Constant(new ObjectConstant(compilation()->method()));
|
|
2663 |
h->set_next(m, 0);
|
|
2664 |
Value p = new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1);
|
|
2665 |
m->set_next(p, 0);
|
|
2666 |
l = p;
|
|
2667 |
}
|
|
2668 |
|
|
2669 |
BlockEnd* g = new Goto(entry, false);
|
|
2670 |
l->set_next(g, entry->bci());
|
|
2671 |
h->set_end(g);
|
|
2672 |
h->set(f);
|
|
2673 |
// setup header block end state
|
|
2674 |
ValueStack* s = state->copy(); // can use copy since stack is empty (=> no phis)
|
|
2675 |
assert(s->stack_is_empty(), "must have empty stack at entry point");
|
|
2676 |
g->set_state(s);
|
|
2677 |
return h;
|
|
2678 |
}
|
|
2679 |
|
|
2680 |
|
|
2681 |
|
|
2682 |
BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) {
|
|
2683 |
BlockBegin* start = new BlockBegin(0);
|
|
2684 |
|
|
2685 |
// This code eliminates the empty start block at the beginning of
|
|
2686 |
// each method. Previously, each method started with the
|
|
2687 |
// start-block created below, and this block was followed by the
|
|
2688 |
// header block that was always empty. This header block is only
|
|
2689 |
// necesary if std_entry is also a backward branch target because
|
|
2690 |
// then phi functions may be necessary in the header block. It's
|
|
2691 |
// also necessary when profiling so that there's a single block that
|
|
2692 |
// can increment the interpreter_invocation_count.
|
|
2693 |
BlockBegin* new_header_block;
|
|
2694 |
if (std_entry->number_of_preds() == 0 && !profile_branches()) {
|
|
2695 |
new_header_block = std_entry;
|
|
2696 |
} else {
|
|
2697 |
new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
|
|
2698 |
}
|
|
2699 |
|
|
2700 |
// setup start block (root for the IR graph)
|
|
2701 |
Base* base =
|
|
2702 |
new Base(
|
|
2703 |
new_header_block,
|
|
2704 |
osr_entry
|
|
2705 |
);
|
|
2706 |
start->set_next(base, 0);
|
|
2707 |
start->set_end(base);
|
|
2708 |
// create & setup state for start block
|
|
2709 |
start->set_state(state->copy());
|
|
2710 |
base->set_state(state->copy());
|
|
2711 |
|
|
2712 |
if (base->std_entry()->state() == NULL) {
|
|
2713 |
// setup states for header blocks
|
|
2714 |
base->std_entry()->merge(state);
|
|
2715 |
}
|
|
2716 |
|
|
2717 |
assert(base->std_entry()->state() != NULL, "");
|
|
2718 |
return start;
|
|
2719 |
}
|
|
2720 |
|
|
2721 |
|
|
2722 |
void GraphBuilder::setup_osr_entry_block() {
|
|
2723 |
assert(compilation()->is_osr_compile(), "only for osrs");
|
|
2724 |
|
|
2725 |
int osr_bci = compilation()->osr_bci();
|
|
2726 |
ciBytecodeStream s(method());
|
|
2727 |
s.reset_to_bci(osr_bci);
|
|
2728 |
s.next();
|
|
2729 |
scope_data()->set_stream(&s);
|
|
2730 |
|
|
2731 |
// create a new block to be the osr setup code
|
|
2732 |
_osr_entry = new BlockBegin(osr_bci);
|
|
2733 |
_osr_entry->set(BlockBegin::osr_entry_flag);
|
|
2734 |
_osr_entry->set_depth_first_number(0);
|
|
2735 |
BlockBegin* target = bci2block()->at(osr_bci);
|
|
2736 |
assert(target != NULL && target->is_set(BlockBegin::osr_entry_flag), "must be there");
|
|
2737 |
// the osr entry has no values for locals
|
|
2738 |
ValueStack* state = target->state()->copy();
|
|
2739 |
_osr_entry->set_state(state);
|
|
2740 |
|
|
2741 |
kill_all();
|
|
2742 |
_block = _osr_entry;
|
|
2743 |
_state = _osr_entry->state()->copy();
|
|
2744 |
_last = _osr_entry;
|
|
2745 |
Value e = append(new OsrEntry());
|
|
2746 |
e->set_needs_null_check(false);
|
|
2747 |
|
|
2748 |
// OSR buffer is
|
|
2749 |
//
|
|
2750 |
// locals[nlocals-1..0]
|
|
2751 |
// monitors[number_of_locks-1..0]
|
|
2752 |
//
|
|
2753 |
// locals is a direct copy of the interpreter frame so in the osr buffer
|
|
2754 |
// so first slot in the local array is the last local from the interpreter
|
|
2755 |
// and last slot is local[0] (receiver) from the interpreter
|
|
2756 |
//
|
|
2757 |
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
|
|
2758 |
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
|
|
2759 |
// in the interpreter frame (the method lock if a sync method)
|
|
2760 |
|
|
2761 |
// Initialize monitors in the compiled activation.
|
|
2762 |
|
|
2763 |
int index;
|
|
2764 |
Value local;
|
|
2765 |
|
|
2766 |
// find all the locals that the interpreter thinks contain live oops
|
|
2767 |
const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
|
|
2768 |
|
|
2769 |
// compute the offset into the locals so that we can treat the buffer
|
|
2770 |
// as if the locals were still in the interpreter frame
|
|
2771 |
int locals_offset = BytesPerWord * (method()->max_locals() - 1);
|
|
2772 |
for_each_local_value(state, index, local) {
|
|
2773 |
int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord;
|
|
2774 |
Value get;
|
|
2775 |
if (local->type()->is_object_kind() && !live_oops.at(index)) {
|
|
2776 |
// The interpreter thinks this local is dead but the compiler
|
|
2777 |
// doesn't so pretend that the interpreter passed in null.
|
|
2778 |
get = append(new Constant(objectNull));
|
|
2779 |
} else {
|
|
2780 |
get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
|
|
2781 |
append(new Constant(new IntConstant(offset))),
|
|
2782 |
0,
|
|
2783 |
true));
|
|
2784 |
}
|
|
2785 |
_state->store_local(index, get);
|
|
2786 |
}
|
|
2787 |
|
|
2788 |
// the storage for the OSR buffer is freed manually in the LIRGenerator.
|
|
2789 |
|
|
2790 |
assert(state->caller_state() == NULL, "should be top scope");
|
|
2791 |
state->clear_locals();
|
|
2792 |
Goto* g = new Goto(target, false);
|
|
2793 |
g->set_state(_state->copy());
|
|
2794 |
append(g);
|
|
2795 |
_osr_entry->set_end(g);
|
|
2796 |
target->merge(_osr_entry->end()->state());
|
|
2797 |
|
|
2798 |
scope_data()->set_stream(NULL);
|
|
2799 |
}
|
|
2800 |
|
|
2801 |
|
|
2802 |
ValueStack* GraphBuilder::state_at_entry() {
|
|
2803 |
ValueStack* state = new ValueStack(scope(), method()->max_locals(), method()->max_stack());
|
|
2804 |
|
|
2805 |
// Set up locals for receiver
|
|
2806 |
int idx = 0;
|
|
2807 |
if (!method()->is_static()) {
|
|
2808 |
// we should always see the receiver
|
|
2809 |
state->store_local(idx, new Local(objectType, idx));
|
|
2810 |
idx = 1;
|
|
2811 |
}
|
|
2812 |
|
|
2813 |
// Set up locals for incoming arguments
|
|
2814 |
ciSignature* sig = method()->signature();
|
|
2815 |
for (int i = 0; i < sig->count(); i++) {
|
|
2816 |
ciType* type = sig->type_at(i);
|
|
2817 |
BasicType basic_type = type->basic_type();
|
|
2818 |
// don't allow T_ARRAY to propagate into locals types
|
|
2819 |
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
|
|
2820 |
ValueType* vt = as_ValueType(basic_type);
|
|
2821 |
state->store_local(idx, new Local(vt, idx));
|
|
2822 |
idx += type->size();
|
|
2823 |
}
|
|
2824 |
|
|
2825 |
// lock synchronized method
|
|
2826 |
if (method()->is_synchronized()) {
|
|
2827 |
state->lock(scope(), NULL);
|
|
2828 |
}
|
|
2829 |
|
|
2830 |
return state;
|
|
2831 |
}
|
|
2832 |
|
|
2833 |
|
|
2834 |
GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
|
|
2835 |
: _scope_data(NULL)
|
|
2836 |
, _exception_state(NULL)
|
|
2837 |
, _instruction_count(0)
|
|
2838 |
, _osr_entry(NULL)
|
|
2839 |
, _memory(new MemoryBuffer())
|
|
2840 |
, _compilation(compilation)
|
|
2841 |
, _inline_bailout_msg(NULL)
|
|
2842 |
{
|
|
2843 |
int osr_bci = compilation->osr_bci();
|
|
2844 |
|
|
2845 |
// determine entry points and bci2block mapping
|
|
2846 |
BlockListBuilder blm(compilation, scope, osr_bci);
|
|
2847 |
CHECK_BAILOUT();
|
|
2848 |
|
|
2849 |
BlockList* bci2block = blm.bci2block();
|
|
2850 |
BlockBegin* start_block = bci2block->at(0);
|
|
2851 |
|
|
2852 |
assert(is_initialized(), "GraphBuilder must have been initialized");
|
|
2853 |
push_root_scope(scope, bci2block, start_block);
|
|
2854 |
|
|
2855 |
// setup state for std entry
|
|
2856 |
_initial_state = state_at_entry();
|
|
2857 |
start_block->merge(_initial_state);
|
|
2858 |
|
|
2859 |
BlockBegin* sync_handler = NULL;
|
|
2860 |
if (method()->is_synchronized() || DTraceMethodProbes) {
|
|
2861 |
// setup an exception handler to do the unlocking and/or notification
|
|
2862 |
sync_handler = new BlockBegin(-1);
|
|
2863 |
sync_handler->set(BlockBegin::exception_entry_flag);
|
|
2864 |
sync_handler->set(BlockBegin::is_on_work_list_flag);
|
|
2865 |
sync_handler->set(BlockBegin::default_exception_handler_flag);
|
|
2866 |
|
|
2867 |
ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
|
|
2868 |
XHandler* h = new XHandler(desc);
|
|
2869 |
h->set_entry_block(sync_handler);
|
|
2870 |
scope_data()->xhandlers()->append(h);
|
|
2871 |
scope_data()->set_has_handler();
|
|
2872 |
}
|
|
2873 |
|
|
2874 |
// complete graph
|
|
2875 |
_vmap = new ValueMap();
|
|
2876 |
scope->compute_lock_stack_size();
|
|
2877 |
switch (scope->method()->intrinsic_id()) {
|
|
2878 |
case vmIntrinsics::_dabs : // fall through
|
|
2879 |
case vmIntrinsics::_dsqrt : // fall through
|
|
2880 |
case vmIntrinsics::_dsin : // fall through
|
|
2881 |
case vmIntrinsics::_dcos : // fall through
|
|
2882 |
case vmIntrinsics::_dtan : // fall through
|
|
2883 |
case vmIntrinsics::_dlog : // fall through
|
|
2884 |
case vmIntrinsics::_dlog10 : // fall through
|
|
2885 |
{
|
|
2886 |
// Compiles where the root method is an intrinsic need a special
|
|
2887 |
// compilation environment because the bytecodes for the method
|
|
2888 |
// shouldn't be parsed during the compilation, only the special
|
|
2889 |
// Intrinsic node should be emitted. If this isn't done the the
|
|
2890 |
// code for the inlined version will be different than the root
|
|
2891 |
// compiled version which could lead to monotonicity problems on
|
|
2892 |
// intel.
|
|
2893 |
|
|
2894 |
// Set up a stream so that appending instructions works properly.
|
|
2895 |
ciBytecodeStream s(scope->method());
|
|
2896 |
s.reset_to_bci(0);
|
|
2897 |
scope_data()->set_stream(&s);
|
|
2898 |
s.next();
|
|
2899 |
|
|
2900 |
// setup the initial block state
|
|
2901 |
_block = start_block;
|
|
2902 |
_state = start_block->state()->copy();
|
|
2903 |
_last = start_block;
|
|
2904 |
load_local(doubleType, 0);
|
|
2905 |
|
|
2906 |
// Emit the intrinsic node.
|
|
2907 |
bool result = try_inline_intrinsics(scope->method());
|
|
2908 |
if (!result) BAILOUT("failed to inline intrinsic");
|
|
2909 |
method_return(dpop());
|
|
2910 |
|
|
2911 |
// connect the begin and end blocks and we're all done.
|
|
2912 |
BlockEnd* end = last()->as_BlockEnd();
|
|
2913 |
block()->set_end(end);
|
|
2914 |
end->set_state(state());
|
|
2915 |
break;
|
|
2916 |
}
|
|
2917 |
default:
|
|
2918 |
scope_data()->add_to_work_list(start_block);
|
|
2919 |
iterate_all_blocks();
|
|
2920 |
break;
|
|
2921 |
}
|
|
2922 |
CHECK_BAILOUT();
|
|
2923 |
|
|
2924 |
if (sync_handler && sync_handler->state() != NULL) {
|
|
2925 |
Value lock = NULL;
|
|
2926 |
if (method()->is_synchronized()) {
|
|
2927 |
lock = method()->is_static() ? new Constant(new InstanceConstant(method()->holder()->java_mirror())) :
|
|
2928 |
_initial_state->local_at(0);
|
|
2929 |
|
|
2930 |
sync_handler->state()->unlock();
|
|
2931 |
sync_handler->state()->lock(scope, lock);
|
|
2932 |
|
|
2933 |
}
|
|
2934 |
fill_sync_handler(lock, sync_handler, true);
|
|
2935 |
}
|
|
2936 |
|
|
2937 |
_start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
|
|
2938 |
|
|
2939 |
eliminate_redundant_phis(_start);
|
|
2940 |
|
|
2941 |
NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats());
|
|
2942 |
// for osr compile, bailout if some requirements are not fulfilled
|
|
2943 |
if (osr_bci != -1) {
|
|
2944 |
BlockBegin* osr_block = blm.bci2block()->at(osr_bci);
|
|
2945 |
assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile");
|
|
2946 |
|
|
2947 |
// check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points
|
|
2948 |
if (!osr_block->state()->stack_is_empty()) {
|
|
2949 |
BAILOUT("stack not empty at OSR entry point");
|
|
2950 |
}
|
|
2951 |
}
|
|
2952 |
#ifndef PRODUCT
|
|
2953 |
if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count);
|
|
2954 |
#endif
|
|
2955 |
}
|
|
2956 |
|
|
2957 |
|
|
2958 |
ValueStack* GraphBuilder::lock_stack() {
|
|
2959 |
// return a new ValueStack representing just the current lock stack
|
|
2960 |
// (for debug info at safepoints in exception throwing or handling)
|
|
2961 |
ValueStack* new_stack = state()->copy_locks();
|
|
2962 |
return new_stack;
|
|
2963 |
}
|
|
2964 |
|
|
2965 |
|
|
2966 |
int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
|
|
2967 |
int recur_level = 0;
|
|
2968 |
for (IRScope* s = scope(); s != NULL; s = s->caller()) {
|
|
2969 |
if (s->method() == cur_callee) {
|
|
2970 |
++recur_level;
|
|
2971 |
}
|
|
2972 |
}
|
|
2973 |
return recur_level;
|
|
2974 |
}
|
|
2975 |
|
|
2976 |
|
|
2977 |
bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
|
|
2978 |
// Clear out any existing inline bailout condition
|
|
2979 |
clear_inline_bailout();
|
|
2980 |
|
|
2981 |
if (callee->should_exclude()) {
|
|
2982 |
// callee is excluded
|
|
2983 |
INLINE_BAILOUT("excluded by CompilerOracle")
|
|
2984 |
} else if (!callee->can_be_compiled()) {
|
|
2985 |
// callee is not compilable (prob. has breakpoints)
|
|
2986 |
INLINE_BAILOUT("not compilable")
|
|
2987 |
} else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
|
|
2988 |
// intrinsics can be native or not
|
|
2989 |
return true;
|
|
2990 |
} else if (callee->is_native()) {
|
|
2991 |
// non-intrinsic natives cannot be inlined
|
|
2992 |
INLINE_BAILOUT("non-intrinsic native")
|
|
2993 |
} else if (callee->is_abstract()) {
|
|
2994 |
INLINE_BAILOUT("abstract")
|
|
2995 |
} else {
|
|
2996 |
return try_inline_full(callee, holder_known);
|
|
2997 |
}
|
|
2998 |
}
|
|
2999 |
|
|
3000 |
|
|
3001 |
bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
|
3002 |
if (!InlineNatives ) INLINE_BAILOUT("intrinsic method inlining disabled");
|
|
3003 |
if (callee->is_synchronized()) INLINE_BAILOUT("intrinsic method is synchronized");
|
|
3004 |
// callee seems like a good candidate
|
|
3005 |
// determine id
|
|
3006 |
bool preserves_state = false;
|
|
3007 |
bool cantrap = true;
|
|
3008 |
vmIntrinsics::ID id = callee->intrinsic_id();
|
|
3009 |
switch (id) {
|
|
3010 |
case vmIntrinsics::_arraycopy :
|
|
3011 |
if (!InlineArrayCopy) return false;
|
|
3012 |
break;
|
|
3013 |
|
|
3014 |
case vmIntrinsics::_currentTimeMillis:
|
|
3015 |
case vmIntrinsics::_nanoTime:
|
|
3016 |
preserves_state = true;
|
|
3017 |
cantrap = false;
|
|
3018 |
break;
|
|
3019 |
|
|
3020 |
case vmIntrinsics::_floatToRawIntBits :
|
|
3021 |
case vmIntrinsics::_intBitsToFloat :
|
|
3022 |
case vmIntrinsics::_doubleToRawLongBits :
|
|
3023 |
case vmIntrinsics::_longBitsToDouble :
|
|
3024 |
if (!InlineMathNatives) return false;
|
|
3025 |
preserves_state = true;
|
|
3026 |
cantrap = false;
|
|
3027 |
break;
|
|
3028 |
|
|
3029 |
case vmIntrinsics::_getClass :
|
|
3030 |
if (!InlineClassNatives) return false;
|
|
3031 |
preserves_state = true;
|
|
3032 |
break;
|
|
3033 |
|
|
3034 |
case vmIntrinsics::_currentThread :
|
|
3035 |
if (!InlineThreadNatives) return false;
|
|
3036 |
preserves_state = true;
|
|
3037 |
cantrap = false;
|
|
3038 |
break;
|
|
3039 |
|
|
3040 |
case vmIntrinsics::_dabs : // fall through
|
|
3041 |
case vmIntrinsics::_dsqrt : // fall through
|
|
3042 |
case vmIntrinsics::_dsin : // fall through
|
|
3043 |
case vmIntrinsics::_dcos : // fall through
|
|
3044 |
case vmIntrinsics::_dtan : // fall through
|
|
3045 |
case vmIntrinsics::_dlog : // fall through
|
|
3046 |
case vmIntrinsics::_dlog10 : // fall through
|
|
3047 |
if (!InlineMathNatives) return false;
|
|
3048 |
cantrap = false;
|
|
3049 |
preserves_state = true;
|
|
3050 |
break;
|
|
3051 |
|
|
3052 |
// sun/misc/AtomicLong.attemptUpdate
|
|
3053 |
case vmIntrinsics::_attemptUpdate :
|
|
3054 |
if (!VM_Version::supports_cx8()) return false;
|
|
3055 |
if (!InlineAtomicLong) return false;
|
|
3056 |
preserves_state = true;
|
|
3057 |
break;
|
|
3058 |
|
|
3059 |
// Use special nodes for Unsafe instructions so we can more easily
|
|
3060 |
// perform an address-mode optimization on the raw variants
|
|
3061 |
case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT, false);
|
|
3062 |
case vmIntrinsics::_getBoolean: return append_unsafe_get_obj(callee, T_BOOLEAN, false);
|
|
3063 |
case vmIntrinsics::_getByte : return append_unsafe_get_obj(callee, T_BYTE, false);
|
|
3064 |
case vmIntrinsics::_getShort : return append_unsafe_get_obj(callee, T_SHORT, false);
|
|
3065 |
case vmIntrinsics::_getChar : return append_unsafe_get_obj(callee, T_CHAR, false);
|
|
3066 |
case vmIntrinsics::_getInt : return append_unsafe_get_obj(callee, T_INT, false);
|
|
3067 |
case vmIntrinsics::_getLong : return append_unsafe_get_obj(callee, T_LONG, false);
|
|
3068 |
case vmIntrinsics::_getFloat : return append_unsafe_get_obj(callee, T_FLOAT, false);
|
|
3069 |
case vmIntrinsics::_getDouble : return append_unsafe_get_obj(callee, T_DOUBLE, false);
|
|
3070 |
|
|
3071 |
case vmIntrinsics::_putObject : return append_unsafe_put_obj(callee, T_OBJECT, false);
|
|
3072 |
case vmIntrinsics::_putBoolean: return append_unsafe_put_obj(callee, T_BOOLEAN, false);
|
|
3073 |
case vmIntrinsics::_putByte : return append_unsafe_put_obj(callee, T_BYTE, false);
|
|
3074 |
case vmIntrinsics::_putShort : return append_unsafe_put_obj(callee, T_SHORT, false);
|
|
3075 |
case vmIntrinsics::_putChar : return append_unsafe_put_obj(callee, T_CHAR, false);
|
|
3076 |
case vmIntrinsics::_putInt : return append_unsafe_put_obj(callee, T_INT, false);
|
|
3077 |
case vmIntrinsics::_putLong : return append_unsafe_put_obj(callee, T_LONG, false);
|
|
3078 |
case vmIntrinsics::_putFloat : return append_unsafe_put_obj(callee, T_FLOAT, false);
|
|
3079 |
case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE, false);
|
|
3080 |
|
|
3081 |
case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT, true);
|
|
3082 |
case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
|
|
3083 |
case vmIntrinsics::_getByteVolatile : return append_unsafe_get_obj(callee, T_BYTE, true);
|
|
3084 |
case vmIntrinsics::_getShortVolatile : return append_unsafe_get_obj(callee, T_SHORT, true);
|
|
3085 |
case vmIntrinsics::_getCharVolatile : return append_unsafe_get_obj(callee, T_CHAR, true);
|
|
3086 |
case vmIntrinsics::_getIntVolatile : return append_unsafe_get_obj(callee, T_INT, true);
|
|
3087 |
case vmIntrinsics::_getLongVolatile : return append_unsafe_get_obj(callee, T_LONG, true);
|
|
3088 |
case vmIntrinsics::_getFloatVolatile : return append_unsafe_get_obj(callee, T_FLOAT, true);
|
|
3089 |
case vmIntrinsics::_getDoubleVolatile : return append_unsafe_get_obj(callee, T_DOUBLE, true);
|
|
3090 |
|
|
3091 |
case vmIntrinsics::_putObjectVolatile : return append_unsafe_put_obj(callee, T_OBJECT, true);
|
|
3092 |
case vmIntrinsics::_putBooleanVolatile: return append_unsafe_put_obj(callee, T_BOOLEAN, true);
|
|
3093 |
case vmIntrinsics::_putByteVolatile : return append_unsafe_put_obj(callee, T_BYTE, true);
|
|
3094 |
case vmIntrinsics::_putShortVolatile : return append_unsafe_put_obj(callee, T_SHORT, true);
|
|
3095 |
case vmIntrinsics::_putCharVolatile : return append_unsafe_put_obj(callee, T_CHAR, true);
|
|
3096 |
case vmIntrinsics::_putIntVolatile : return append_unsafe_put_obj(callee, T_INT, true);
|
|
3097 |
case vmIntrinsics::_putLongVolatile : return append_unsafe_put_obj(callee, T_LONG, true);
|
|
3098 |
case vmIntrinsics::_putFloatVolatile : return append_unsafe_put_obj(callee, T_FLOAT, true);
|
|
3099 |
case vmIntrinsics::_putDoubleVolatile : return append_unsafe_put_obj(callee, T_DOUBLE, true);
|
|
3100 |
|
|
3101 |
case vmIntrinsics::_getByte_raw : return append_unsafe_get_raw(callee, T_BYTE);
|
|
3102 |
case vmIntrinsics::_getShort_raw : return append_unsafe_get_raw(callee, T_SHORT);
|
|
3103 |
case vmIntrinsics::_getChar_raw : return append_unsafe_get_raw(callee, T_CHAR);
|
|
3104 |
case vmIntrinsics::_getInt_raw : return append_unsafe_get_raw(callee, T_INT);
|
|
3105 |
case vmIntrinsics::_getLong_raw : return append_unsafe_get_raw(callee, T_LONG);
|
|
3106 |
case vmIntrinsics::_getFloat_raw : return append_unsafe_get_raw(callee, T_FLOAT);
|
|
3107 |
case vmIntrinsics::_getDouble_raw : return append_unsafe_get_raw(callee, T_DOUBLE);
|
|
3108 |
|
|
3109 |
case vmIntrinsics::_putByte_raw : return append_unsafe_put_raw(callee, T_BYTE);
|
|
3110 |
case vmIntrinsics::_putShort_raw : return append_unsafe_put_raw(callee, T_SHORT);
|
|
3111 |
case vmIntrinsics::_putChar_raw : return append_unsafe_put_raw(callee, T_CHAR);
|
|
3112 |
case vmIntrinsics::_putInt_raw : return append_unsafe_put_raw(callee, T_INT);
|
|
3113 |
case vmIntrinsics::_putLong_raw : return append_unsafe_put_raw(callee, T_LONG);
|
|
3114 |
case vmIntrinsics::_putFloat_raw : return append_unsafe_put_raw(callee, T_FLOAT);
|
|
3115 |
case vmIntrinsics::_putDouble_raw : return append_unsafe_put_raw(callee, T_DOUBLE);
|
|
3116 |
|
|
3117 |
case vmIntrinsics::_prefetchRead : return append_unsafe_prefetch(callee, false, false);
|
|
3118 |
case vmIntrinsics::_prefetchWrite : return append_unsafe_prefetch(callee, false, true);
|
|
3119 |
case vmIntrinsics::_prefetchReadStatic : return append_unsafe_prefetch(callee, true, false);
|
|
3120 |
case vmIntrinsics::_prefetchWriteStatic : return append_unsafe_prefetch(callee, true, true);
|
|
3121 |
|
|
3122 |
case vmIntrinsics::_checkIndex :
|
|
3123 |
if (!InlineNIOCheckIndex) return false;
|
|
3124 |
preserves_state = true;
|
|
3125 |
break;
|
|
3126 |
case vmIntrinsics::_putOrderedObject : return append_unsafe_put_obj(callee, T_OBJECT, true);
|
|
3127 |
case vmIntrinsics::_putOrderedInt : return append_unsafe_put_obj(callee, T_INT, true);
|
|
3128 |
case vmIntrinsics::_putOrderedLong : return append_unsafe_put_obj(callee, T_LONG, true);
|
|
3129 |
|
|
3130 |
case vmIntrinsics::_compareAndSwapLong:
|
|
3131 |
if (!VM_Version::supports_cx8()) return false;
|
|
3132 |
// fall through
|
|
3133 |
case vmIntrinsics::_compareAndSwapInt:
|
|
3134 |
case vmIntrinsics::_compareAndSwapObject:
|
|
3135 |
append_unsafe_CAS(callee);
|
|
3136 |
return true;
|
|
3137 |
|
|
3138 |
default : return false; // do not inline
|
|
3139 |
}
|
|
3140 |
// create intrinsic node
|
|
3141 |
const bool has_receiver = !callee->is_static();
|
|
3142 |
ValueType* result_type = as_ValueType(callee->return_type());
|
|
3143 |
|
|
3144 |
Values* args = state()->pop_arguments(callee->arg_size());
|
|
3145 |
ValueStack* locks = lock_stack();
|
|
3146 |
if (profile_calls()) {
|
|
3147 |
// Don't profile in the special case where the root method
|
|
3148 |
// is the intrinsic
|
|
3149 |
if (callee != method()) {
|
|
3150 |
Value recv = NULL;
|
|
3151 |
if (has_receiver) {
|
|
3152 |
recv = args->at(0);
|
|
3153 |
null_check(recv);
|
|
3154 |
}
|
|
3155 |
profile_call(recv, NULL);
|
|
3156 |
}
|
|
3157 |
}
|
|
3158 |
|
|
3159 |
Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, lock_stack(),
|
|
3160 |
preserves_state, cantrap);
|
|
3161 |
// append instruction & push result
|
|
3162 |
Value value = append_split(result);
|
|
3163 |
if (result_type != voidType) push(result_type, value);
|
|
3164 |
|
|
3165 |
#ifndef PRODUCT
|
|
3166 |
// printing
|
|
3167 |
if (PrintInlining) {
|
|
3168 |
print_inline_result(callee, true);
|
|
3169 |
}
|
|
3170 |
#endif
|
|
3171 |
|
|
3172 |
// done
|
|
3173 |
return true;
|
|
3174 |
}
|
|
3175 |
|
|
3176 |
|
|
3177 |
bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) {
|
|
3178 |
// Introduce a new callee continuation point - all Ret instructions
|
|
3179 |
// will be replaced with Gotos to this point.
|
|
3180 |
BlockBegin* cont = block_at(next_bci());
|
|
3181 |
assert(cont != NULL, "continuation must exist (BlockListBuilder starts a new block after a jsr");
|
|
3182 |
|
|
3183 |
// Note: can not assign state to continuation yet, as we have to
|
|
3184 |
// pick up the state from the Ret instructions.
|
|
3185 |
|
|
3186 |
// Push callee scope
|
|
3187 |
push_scope_for_jsr(cont, jsr_dest_bci);
|
|
3188 |
|
|
3189 |
// Temporarily set up bytecode stream so we can append instructions
|
|
3190 |
// (only using the bci of this stream)
|
|
3191 |
scope_data()->set_stream(scope_data()->parent()->stream());
|
|
3192 |
|
|
3193 |
BlockBegin* jsr_start_block = block_at(jsr_dest_bci);
|
|
3194 |
assert(jsr_start_block != NULL, "jsr start block must exist");
|
|
3195 |
assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet");
|
|
3196 |
Goto* goto_sub = new Goto(jsr_start_block, false);
|
|
3197 |
goto_sub->set_state(state());
|
|
3198 |
// Must copy state to avoid wrong sharing when parsing bytecodes
|
|
3199 |
assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block");
|
|
3200 |
jsr_start_block->set_state(state()->copy());
|
|
3201 |
append(goto_sub);
|
|
3202 |
_block->set_end(goto_sub);
|
|
3203 |
_last = _block = jsr_start_block;
|
|
3204 |
|
|
3205 |
// Clear out bytecode stream
|
|
3206 |
scope_data()->set_stream(NULL);
|
|
3207 |
|
|
3208 |
scope_data()->add_to_work_list(jsr_start_block);
|
|
3209 |
|
|
3210 |
// Ready to resume parsing in subroutine
|
|
3211 |
iterate_all_blocks();
|
|
3212 |
|
|
3213 |
// If we bailed out during parsing, return immediately (this is bad news)
|
|
3214 |
CHECK_BAILOUT_(false);
|
|
3215 |
|
|
3216 |
// Detect whether the continuation can actually be reached. If not,
|
|
3217 |
// it has not had state set by the join() operations in
|
|
3218 |
// iterate_bytecodes_for_block()/ret() and we should not touch the
|
|
3219 |
// iteration state. The calling activation of
|
|
3220 |
// iterate_bytecodes_for_block will then complete normally.
|
|
3221 |
if (cont->state() != NULL) {
|
|
3222 |
if (!cont->is_set(BlockBegin::was_visited_flag)) {
|
|
3223 |
// add continuation to work list instead of parsing it immediately
|
|
3224 |
scope_data()->parent()->add_to_work_list(cont);
|
|
3225 |
}
|
|
3226 |
}
|
|
3227 |
|
|
3228 |
assert(jsr_continuation() == cont, "continuation must not have changed");
|
|
3229 |
assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) ||
|
|
3230 |
jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag),
|
|
3231 |
"continuation can only be visited in case of backward branches");
|
|
3232 |
assert(_last && _last->as_BlockEnd(), "block must have end");
|
|
3233 |
|
|
3234 |
// continuation is in work list, so end iteration of current block
|
|
3235 |
_skip_block = true;
|
|
3236 |
pop_scope_for_jsr();
|
|
3237 |
|
|
3238 |
return true;
|
|
3239 |
}
|
|
3240 |
|
|
3241 |
|
|
3242 |
// Inline the entry of a synchronized method as a monitor enter and
|
|
3243 |
// register the exception handler which releases the monitor if an
|
|
3244 |
// exception is thrown within the callee. Note that the monitor enter
|
|
3245 |
// cannot throw an exception itself, because the receiver is
|
|
3246 |
// guaranteed to be non-null by the explicit null check at the
|
|
3247 |
// beginning of inlining.
|
|
3248 |
void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) {
|
|
3249 |
assert(lock != NULL && sync_handler != NULL, "lock or handler missing");
|
|
3250 |
|
|
3251 |
set_exception_state(state()->copy());
|
|
3252 |
monitorenter(lock, SynchronizationEntryBCI);
|
|
3253 |
assert(_last->as_MonitorEnter() != NULL, "monitor enter expected");
|
|
3254 |
_last->set_needs_null_check(false);
|
|
3255 |
|
|
3256 |
sync_handler->set(BlockBegin::exception_entry_flag);
|
|
3257 |
sync_handler->set(BlockBegin::is_on_work_list_flag);
|
|
3258 |
|
|
3259 |
ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
|
|
3260 |
XHandler* h = new XHandler(desc);
|
|
3261 |
h->set_entry_block(sync_handler);
|
|
3262 |
scope_data()->xhandlers()->append(h);
|
|
3263 |
scope_data()->set_has_handler();
|
|
3264 |
}
|
|
3265 |
|
|
3266 |
|
|
3267 |
// If an exception is thrown and not handled within an inlined
|
|
3268 |
// synchronized method, the monitor must be released before the
|
|
3269 |
// exception is rethrown in the outer scope. Generate the appropriate
|
|
3270 |
// instructions here.
|
|
3271 |
void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) {
|
|
3272 |
BlockBegin* orig_block = _block;
|
|
3273 |
ValueStack* orig_state = _state;
|
|
3274 |
Instruction* orig_last = _last;
|
|
3275 |
_last = _block = sync_handler;
|
|
3276 |
_state = sync_handler->state()->copy();
|
|
3277 |
|
|
3278 |
assert(sync_handler != NULL, "handler missing");
|
|
3279 |
assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here");
|
|
3280 |
|
|
3281 |
assert(lock != NULL || default_handler, "lock or handler missing");
|
|
3282 |
|
|
3283 |
XHandler* h = scope_data()->xhandlers()->remove_last();
|
|
3284 |
assert(h->entry_block() == sync_handler, "corrupt list of handlers");
|
|
3285 |
|
|
3286 |
block()->set(BlockBegin::was_visited_flag);
|
|
3287 |
Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI);
|
|
3288 |
assert(exception->is_pinned(), "must be");
|
|
3289 |
|
|
3290 |
int bci = SynchronizationEntryBCI;
|
|
3291 |
if (lock) {
|
|
3292 |
assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
|
|
3293 |
if (lock->bci() == -99) {
|
|
3294 |
lock = append_with_bci(lock, -1);
|
|
3295 |
}
|
|
3296 |
|
|
3297 |
// exit the monitor in the context of the synchronized method
|
|
3298 |
monitorexit(lock, SynchronizationEntryBCI);
|
|
3299 |
|
|
3300 |
// exit the context of the synchronized method
|
|
3301 |
if (!default_handler) {
|
|
3302 |
pop_scope();
|
|
3303 |
_state = _state->copy();
|
|
3304 |
bci = _state->scope()->caller_bci();
|
|
3305 |
_state = _state->pop_scope()->copy();
|
|
3306 |
}
|
|
3307 |
}
|
|
3308 |
|
|
3309 |
// perform the throw as if at the the call site
|
|
3310 |
apush(exception);
|
|
3311 |
|
|
3312 |
set_exception_state(state()->copy());
|
|
3313 |
throw_op(bci);
|
|
3314 |
|
|
3315 |
BlockEnd* end = last()->as_BlockEnd();
|
|
3316 |
block()->set_end(end);
|
|
3317 |
end->set_state(state());
|
|
3318 |
|
|
3319 |
_block = orig_block;
|
|
3320 |
_state = orig_state;
|
|
3321 |
_last = orig_last;
|
|
3322 |
}
|
|
3323 |
|
|
3324 |
|
|
3325 |
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
|
3326 |
assert(!callee->is_native(), "callee must not be native");
|
|
3327 |
|
|
3328 |
// first perform tests of things it's not possible to inline
|
|
3329 |
if (callee->has_exception_handlers() &&
|
|
3330 |
!InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
|
|
3331 |
if (callee->is_synchronized() &&
|
|
3332 |
!InlineSynchronizedMethods ) INLINE_BAILOUT("callee is synchronized");
|
|
3333 |
if (!callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet");
|
|
3334 |
if (!callee->has_balanced_monitors()) INLINE_BAILOUT("callee's monitors do not match");
|
|
3335 |
|
|
3336 |
// Proper inlining of methods with jsrs requires a little more work.
|
|
3337 |
if (callee->has_jsrs() ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
|
|
3338 |
|
|
3339 |
// now perform tests that are based on flag settings
|
|
3340 |
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining");
|
|
3341 |
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
|
|
3342 |
if (callee->code_size() > max_inline_size() ) INLINE_BAILOUT("callee is too large");
|
|
3343 |
|
|
3344 |
// don't inline throwable methods unless the inlining tree is rooted in a throwable class
|
|
3345 |
if (callee->name() == ciSymbol::object_initializer_name() &&
|
|
3346 |
callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
|
|
3347 |
// Throwable constructor call
|
|
3348 |
IRScope* top = scope();
|
|
3349 |
while (top->caller() != NULL) {
|
|
3350 |
top = top->caller();
|
|
3351 |
}
|
|
3352 |
if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
|
|
3353 |
INLINE_BAILOUT("don't inline Throwable constructors");
|
|
3354 |
}
|
|
3355 |
}
|
|
3356 |
|
|
3357 |
// When SSE2 is used on intel, then no special handling is needed
|
|
3358 |
// for strictfp because the enum-constant is fixed at compile time,
|
|
3359 |
// the check for UseSSE2 is needed here
|
|
3360 |
if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) {
|
|
3361 |
INLINE_BAILOUT("caller and callee have different strict fp requirements");
|
|
3362 |
}
|
|
3363 |
|
|
3364 |
if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
|
|
3365 |
INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
|
|
3366 |
}
|
|
3367 |
|
|
3368 |
#ifndef PRODUCT
|
|
3369 |
// printing
|
|
3370 |
if (PrintInlining) {
|
|
3371 |
print_inline_result(callee, true);
|
|
3372 |
}
|
|
3373 |
#endif
|
|
3374 |
|
|
3375 |
// NOTE: Bailouts from this point on, which occur at the
|
|
3376 |
// GraphBuilder level, do not cause bailout just of the inlining but
|
|
3377 |
// in fact of the entire compilation.
|
|
3378 |
|
|
3379 |
BlockBegin* orig_block = block();
|
|
3380 |
|
|
3381 |
const int args_base = state()->stack_size() - callee->arg_size();
|
|
3382 |
assert(args_base >= 0, "stack underflow during inlining");
|
|
3383 |
|
|
3384 |
// Insert null check if necessary
|
|
3385 |
Value recv = NULL;
|
|
3386 |
if (code() != Bytecodes::_invokestatic) {
|
|
3387 |
// note: null check must happen even if first instruction of callee does
|
|
3388 |
// an implicit null check since the callee is in a different scope
|
|
3389 |
// and we must make sure exception handling does the right thing
|
|
3390 |
assert(!callee->is_static(), "callee must not be static");
|
|
3391 |
assert(callee->arg_size() > 0, "must have at least a receiver");
|
|
3392 |
recv = state()->stack_at(args_base);
|
|
3393 |
null_check(recv);
|
|
3394 |
}
|
|
3395 |
|
|
3396 |
if (profile_inlined_calls()) {
|
|
3397 |
profile_call(recv, holder_known ? callee->holder() : NULL);
|
|
3398 |
}
|
|
3399 |
|
|
3400 |
profile_invocation(callee);
|
|
3401 |
|
|
3402 |
// Introduce a new callee continuation point - if the callee has
|
|
3403 |
// more than one return instruction or the return does not allow
|
|
3404 |
// fall-through of control flow, all return instructions of the
|
|
3405 |
// callee will need to be replaced by Goto's pointing to this
|
|
3406 |
// continuation point.
|
|
3407 |
BlockBegin* cont = block_at(next_bci());
|
|
3408 |
bool continuation_existed = true;
|
|
3409 |
if (cont == NULL) {
|
|
3410 |
cont = new BlockBegin(next_bci());
|
|
3411 |
// low number so that continuation gets parsed as early as possible
|
|
3412 |
cont->set_depth_first_number(0);
|
|
3413 |
#ifndef PRODUCT
|
|
3414 |
if (PrintInitialBlockList) {
|
|
3415 |
tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
|
|
3416 |
cont->block_id(), cont->bci(), bci());
|
|
3417 |
}
|
|
3418 |
#endif
|
|
3419 |
continuation_existed = false;
|
|
3420 |
}
|
|
3421 |
// Record number of predecessors of continuation block before
|
|
3422 |
// inlining, to detect if inlined method has edges to its
|
|
3423 |
// continuation after inlining.
|
|
3424 |
int continuation_preds = cont->number_of_preds();
|
|
3425 |
|
|
3426 |
// Push callee scope
|
|
3427 |
push_scope(callee, cont);
|
|
3428 |
|
|
3429 |
// the BlockListBuilder for the callee could have bailed out
|
|
3430 |
CHECK_BAILOUT_(false);
|
|
3431 |
|
|
3432 |
// Temporarily set up bytecode stream so we can append instructions
|
|
3433 |
// (only using the bci of this stream)
|
|
3434 |
scope_data()->set_stream(scope_data()->parent()->stream());
|
|
3435 |
|
|
3436 |
// Pass parameters into callee state: add assignments
|
|
3437 |
// note: this will also ensure that all arguments are computed before being passed
|
|
3438 |
ValueStack* callee_state = state();
|
|
3439 |
ValueStack* caller_state = scope()->caller_state();
|
|
3440 |
{ int i = args_base;
|
|
3441 |
while (i < caller_state->stack_size()) {
|
|
3442 |
const int par_no = i - args_base;
|
|
3443 |
Value arg = caller_state->stack_at_inc(i);
|
|
3444 |
// NOTE: take base() of arg->type() to avoid problems storing
|
|
3445 |
// constants
|
|
3446 |
store_local(callee_state, arg, arg->type()->base(), par_no);
|
|
3447 |
}
|
|
3448 |
}
|
|
3449 |
|
|
3450 |
// Remove args from stack.
|
|
3451 |
// Note that we preserve locals state in case we can use it later
|
|
3452 |
// (see use of pop_scope() below)
|
|
3453 |
caller_state->truncate_stack(args_base);
|
|
3454 |
callee_state->truncate_stack(args_base);
|
|
3455 |
|
|
3456 |
// Setup state that is used at returns form the inlined method.
|
|
3457 |
// This is essentially the state of the continuation block,
|
|
3458 |
// but without the return value on stack, if any, this will
|
|
3459 |
// be pushed at the return instruction (see method_return).
|
|
3460 |
scope_data()->set_continuation_state(caller_state->copy());
|
|
3461 |
|
|
3462 |
// Compute lock stack size for callee scope now that args have been passed
|
|
3463 |
scope()->compute_lock_stack_size();
|
|
3464 |
|
|
3465 |
Value lock;
|
|
3466 |
BlockBegin* sync_handler;
|
|
3467 |
|
|
3468 |
// Inline the locking of the receiver if the callee is synchronized
|
|
3469 |
if (callee->is_synchronized()) {
|
|
3470 |
lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
|
|
3471 |
: state()->local_at(0);
|
|
3472 |
sync_handler = new BlockBegin(-1);
|
|
3473 |
inline_sync_entry(lock, sync_handler);
|
|
3474 |
|
|
3475 |
// recompute the lock stack size
|
|
3476 |
scope()->compute_lock_stack_size();
|
|
3477 |
}
|
|
3478 |
|
|
3479 |
|
|
3480 |
BlockBegin* callee_start_block = block_at(0);
|
|
3481 |
if (callee_start_block != NULL) {
|
|
3482 |
assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
|
|
3483 |
Goto* goto_callee = new Goto(callee_start_block, false);
|
|
3484 |
goto_callee->set_state(state());
|
|
3485 |
// The state for this goto is in the scope of the callee, so use
|
|
3486 |
// the entry bci for the callee instead of the call site bci.
|
|
3487 |
append_with_bci(goto_callee, 0);
|
|
3488 |
_block->set_end(goto_callee);
|
|
3489 |
callee_start_block->merge(callee_state);
|
|
3490 |
|
|
3491 |
_last = _block = callee_start_block;
|
|
3492 |
|
|
3493 |
scope_data()->add_to_work_list(callee_start_block);
|
|
3494 |
}
|
|
3495 |
|
|
3496 |
// Clear out bytecode stream
|
|
3497 |
scope_data()->set_stream(NULL);
|
|
3498 |
|
|
3499 |
// Ready to resume parsing in callee (either in the same block we
|
|
3500 |
// were in before or in the callee's start block)
|
|
3501 |
iterate_all_blocks(callee_start_block == NULL);
|
|
3502 |
|
|
3503 |
// If we bailed out during parsing, return immediately (this is bad news)
|
|
3504 |
if (bailed_out()) return false;
|
|
3505 |
|
|
3506 |
// iterate_all_blocks theoretically traverses in random order; in
|
|
3507 |
// practice, we have only traversed the continuation if we are
|
|
3508 |
// inlining into a subroutine
|
|
3509 |
assert(continuation_existed ||
|
|
3510 |
!continuation()->is_set(BlockBegin::was_visited_flag),
|
|
3511 |
"continuation should not have been parsed yet if we created it");
|
|
3512 |
|
|
3513 |
// If we bailed out during parsing, return immediately (this is bad news)
|
|
3514 |
CHECK_BAILOUT_(false);
|
|
3515 |
|
|
3516 |
// At this point we are almost ready to return and resume parsing of
|
|
3517 |
// the caller back in the GraphBuilder. The only thing we want to do
|
|
3518 |
// first is an optimization: during parsing of the callee we
|
|
3519 |
// generated at least one Goto to the continuation block. If we
|
|
3520 |
// generated exactly one, and if the inlined method spanned exactly
|
|
3521 |
// one block (and we didn't have to Goto its entry), then we snip
|
|
3522 |
// off the Goto to the continuation, allowing control to fall
|
|
3523 |
// through back into the caller block and effectively performing
|
|
3524 |
// block merging. This allows load elimination and CSE to take place
|
|
3525 |
// across multiple callee scopes if they are relatively simple, and
|
|
3526 |
// is currently essential to making inlining profitable.
|
|
3527 |
if ( num_returns() == 1
|
|
3528 |
&& block() == orig_block
|
|
3529 |
&& block() == inline_cleanup_block()) {
|
|
3530 |
_last = inline_cleanup_return_prev();
|
|
3531 |
_state = inline_cleanup_state()->pop_scope();
|
|
3532 |
} else if (continuation_preds == cont->number_of_preds()) {
|
|
3533 |
// Inlining caused that the instructions after the invoke in the
|
|
3534 |
// caller are not reachable any more. So skip filling this block
|
|
3535 |
// with instructions!
|
|
3536 |
assert (cont == continuation(), "");
|
|
3537 |
assert(_last && _last->as_BlockEnd(), "");
|
|
3538 |
_skip_block = true;
|
|
3539 |
} else {
|
|
3540 |
// Resume parsing in continuation block unless it was already parsed.
|
|
3541 |
// Note that if we don't change _last here, iteration in
|
|
3542 |
// iterate_bytecodes_for_block will stop when we return.
|
|
3543 |
if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
|
|
3544 |
// add continuation to work list instead of parsing it immediately
|
|
3545 |
assert(_last && _last->as_BlockEnd(), "");
|
|
3546 |
scope_data()->parent()->add_to_work_list(continuation());
|
|
3547 |
_skip_block = true;
|
|
3548 |
}
|
|
3549 |
}
|
|
3550 |
|
|
3551 |
// Fill the exception handler for synchronized methods with instructions
|
|
3552 |
if (callee->is_synchronized() && sync_handler->state() != NULL) {
|
|
3553 |
fill_sync_handler(lock, sync_handler);
|
|
3554 |
} else {
|
|
3555 |
pop_scope();
|
|
3556 |
}
|
|
3557 |
|
|
3558 |
compilation()->notice_inlined_method(callee);
|
|
3559 |
|
|
3560 |
return true;
|
|
3561 |
}
|
|
3562 |
|
|
3563 |
|
|
3564 |
void GraphBuilder::inline_bailout(const char* msg) {
|
|
3565 |
assert(msg != NULL, "inline bailout msg must exist");
|
|
3566 |
_inline_bailout_msg = msg;
|
|
3567 |
}
|
|
3568 |
|
|
3569 |
|
|
3570 |
void GraphBuilder::clear_inline_bailout() {
|
|
3571 |
_inline_bailout_msg = NULL;
|
|
3572 |
}
|
|
3573 |
|
|
3574 |
|
|
3575 |
void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) {
|
|
3576 |
ScopeData* data = new ScopeData(NULL);
|
|
3577 |
data->set_scope(scope);
|
|
3578 |
data->set_bci2block(bci2block);
|
|
3579 |
_scope_data = data;
|
|
3580 |
_block = start;
|
|
3581 |
}
|
|
3582 |
|
|
3583 |
|
|
3584 |
void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) {
|
|
3585 |
IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false);
|
|
3586 |
scope()->add_callee(callee_scope);
|
|
3587 |
|
|
3588 |
BlockListBuilder blb(compilation(), callee_scope, -1);
|
|
3589 |
CHECK_BAILOUT();
|
|
3590 |
|
|
3591 |
if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) {
|
|
3592 |
// this scope can be inlined directly into the caller so remove
|
|
3593 |
// the block at bci 0.
|
|
3594 |
blb.bci2block()->at_put(0, NULL);
|
|
3595 |
}
|
|
3596 |
|
|
3597 |
callee_scope->set_caller_state(state());
|
|
3598 |
set_state(state()->push_scope(callee_scope));
|
|
3599 |
|
|
3600 |
ScopeData* data = new ScopeData(scope_data());
|
|
3601 |
data->set_scope(callee_scope);
|
|
3602 |
data->set_bci2block(blb.bci2block());
|
|
3603 |
data->set_continuation(continuation);
|
|
3604 |
_scope_data = data;
|
|
3605 |
}
|
|
3606 |
|
|
3607 |
|
|
3608 |
void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) {
|
|
3609 |
ScopeData* data = new ScopeData(scope_data());
|
|
3610 |
data->set_parsing_jsr();
|
|
3611 |
data->set_jsr_entry_bci(jsr_dest_bci);
|
|
3612 |
data->set_jsr_return_address_local(-1);
|
|
3613 |
// Must clone bci2block list as we will be mutating it in order to
|
|
3614 |
// properly clone all blocks in jsr region as well as exception
|
|
3615 |
// handlers containing rets
|
|
3616 |
BlockList* new_bci2block = new BlockList(bci2block()->length());
|
|
3617 |
new_bci2block->push_all(bci2block());
|
|
3618 |
data->set_bci2block(new_bci2block);
|
|
3619 |
data->set_scope(scope());
|
|
3620 |
data->setup_jsr_xhandlers();
|
|
3621 |
data->set_continuation(continuation());
|
|
3622 |
if (continuation() != NULL) {
|
|
3623 |
assert(continuation_state() != NULL, "");
|
|
3624 |
data->set_continuation_state(continuation_state()->copy());
|
|
3625 |
}
|
|
3626 |
data->set_jsr_continuation(jsr_continuation);
|
|
3627 |
_scope_data = data;
|
|
3628 |
}
|
|
3629 |
|
|
3630 |
|
|
3631 |
void GraphBuilder::pop_scope() {
|
|
3632 |
int number_of_locks = scope()->number_of_locks();
|
|
3633 |
_scope_data = scope_data()->parent();
|
|
3634 |
// accumulate minimum number of monitor slots to be reserved
|
|
3635 |
scope()->set_min_number_of_locks(number_of_locks);
|
|
3636 |
}
|
|
3637 |
|
|
3638 |
|
|
3639 |
void GraphBuilder::pop_scope_for_jsr() {
|
|
3640 |
_scope_data = scope_data()->parent();
|
|
3641 |
}
|
|
3642 |
|
|
3643 |
bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) {
|
|
3644 |
if (InlineUnsafeOps) {
|
|
3645 |
Values* args = state()->pop_arguments(callee->arg_size());
|
|
3646 |
null_check(args->at(0));
|
|
3647 |
Instruction* offset = args->at(2);
|
|
3648 |
#ifndef _LP64
|
|
3649 |
offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
|
|
3650 |
#endif
|
|
3651 |
Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile));
|
|
3652 |
push(op->type(), op);
|
|
3653 |
compilation()->set_has_unsafe_access(true);
|
|
3654 |
}
|
|
3655 |
return InlineUnsafeOps;
|
|
3656 |
}
|
|
3657 |
|
|
3658 |
|
|
3659 |
bool GraphBuilder::append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile) {
|
|
3660 |
if (InlineUnsafeOps) {
|
|
3661 |
Values* args = state()->pop_arguments(callee->arg_size());
|
|
3662 |
null_check(args->at(0));
|
|
3663 |
Instruction* offset = args->at(2);
|
|
3664 |
#ifndef _LP64
|
|
3665 |
offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
|
|
3666 |
#endif
|
|
3667 |
Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, args->at(3), is_volatile));
|
|
3668 |
compilation()->set_has_unsafe_access(true);
|
|
3669 |
kill_all();
|
|
3670 |
}
|
|
3671 |
return InlineUnsafeOps;
|
|
3672 |
}
|
|
3673 |
|
|
3674 |
|
|
3675 |
bool GraphBuilder::append_unsafe_get_raw(ciMethod* callee, BasicType t) {
|
|
3676 |
if (InlineUnsafeOps) {
|
|
3677 |
Values* args = state()->pop_arguments(callee->arg_size());
|
|
3678 |
null_check(args->at(0));
|
|
3679 |
Instruction* op = append(new UnsafeGetRaw(t, args->at(1), false));
|
|
3680 |
push(op->type(), op);
|
|
3681 |
compilation()->set_has_unsafe_access(true);
|
|
3682 |
}
|
|
3683 |
return InlineUnsafeOps;
|
|
3684 |
}
|
|
3685 |
|
|
3686 |
|
|
3687 |
bool GraphBuilder::append_unsafe_put_raw(ciMethod* callee, BasicType t) {
|
|
3688 |
if (InlineUnsafeOps) {
|
|
3689 |
Values* args = state()->pop_arguments(callee->arg_size());
|
|
3690 |
null_check(args->at(0));
|
|
3691 |
Instruction* op = append(new UnsafePutRaw(t, args->at(1), args->at(2)));
|
|
3692 |
compilation()->set_has_unsafe_access(true);
|
|
3693 |
}
|
|
3694 |
return InlineUnsafeOps;
|
|
3695 |
}
|
|
3696 |
|
|
3697 |
|
|
3698 |
bool GraphBuilder::append_unsafe_prefetch(ciMethod* callee, bool is_static, bool is_store) {
|
|
3699 |
if (InlineUnsafeOps) {
|
|
3700 |
Values* args = state()->pop_arguments(callee->arg_size());
|
|
3701 |
int obj_arg_index = 1; // Assume non-static case
|
|
3702 |
if (is_static) {
|
|
3703 |
obj_arg_index = 0;
|
|
3704 |
} else {
|
|
3705 |
null_check(args->at(0));
|
|
3706 |
}
|
|
3707 |
Instruction* offset = args->at(obj_arg_index + 1);
|
|
3708 |
#ifndef _LP64
|
|
3709 |
offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
|
|
3710 |
#endif
|
|
3711 |
Instruction* op = is_store ? append(new UnsafePrefetchWrite(args->at(obj_arg_index), offset))
|
|
3712 |
: append(new UnsafePrefetchRead (args->at(obj_arg_index), offset));
|
|
3713 |
compilation()->set_has_unsafe_access(true);
|
|
3714 |
}
|
|
3715 |
return InlineUnsafeOps;
|
|
3716 |
}
|
|
3717 |
|
|
3718 |
|
|
3719 |
void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
|
|
3720 |
ValueType* result_type = as_ValueType(callee->return_type());
|
|
3721 |
assert(result_type->is_int(), "int result");
|
|
3722 |
Values* args = state()->pop_arguments(callee->arg_size());
|
|
3723 |
|
|
3724 |
// Pop off some args to speically handle, then push back
|
|
3725 |
Value newval = args->pop();
|
|
3726 |
Value cmpval = args->pop();
|
|
3727 |
Value offset = args->pop();
|
|
3728 |
Value src = args->pop();
|
|
3729 |
Value unsafe_obj = args->pop();
|
|
3730 |
|
|
3731 |
// Separately handle the unsafe arg. It is not needed for code
|
|
3732 |
// generation, but must be null checked
|
|
3733 |
null_check(unsafe_obj);
|
|
3734 |
|
|
3735 |
#ifndef _LP64
|
|
3736 |
offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
|
|
3737 |
#endif
|
|
3738 |
|
|
3739 |
args->push(src);
|
|
3740 |
args->push(offset);
|
|
3741 |
args->push(cmpval);
|
|
3742 |
args->push(newval);
|
|
3743 |
|
|
3744 |
// An unsafe CAS can alias with other field accesses, but we don't
|
|
3745 |
// know which ones so mark the state as no preserved. This will
|
|
3746 |
// cause CSE to invalidate memory across it.
|
|
3747 |
bool preserves_state = false;
|
|
3748 |
Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, lock_stack(), preserves_state);
|
|
3749 |
append_split(result);
|
|
3750 |
push(result_type, result);
|
|
3751 |
compilation()->set_has_unsafe_access(true);
|
|
3752 |
}
|
|
3753 |
|
|
3754 |
|
|
3755 |
#ifndef PRODUCT
|
|
3756 |
void GraphBuilder::print_inline_result(ciMethod* callee, bool res) {
|
|
3757 |
const char sync_char = callee->is_synchronized() ? 's' : ' ';
|
|
3758 |
const char exception_char = callee->has_exception_handlers() ? '!' : ' ';
|
|
3759 |
const char monitors_char = callee->has_monitor_bytecodes() ? 'm' : ' ';
|
|
3760 |
tty->print(" %c%c%c ", sync_char, exception_char, monitors_char);
|
|
3761 |
for (int i = 0; i < scope()->level(); i++) tty->print(" ");
|
|
3762 |
if (res) {
|
|
3763 |
tty->print(" ");
|
|
3764 |
} else {
|
|
3765 |
tty->print("- ");
|
|
3766 |
}
|
|
3767 |
tty->print("@ %d ", bci());
|
|
3768 |
callee->print_short_name();
|
|
3769 |
tty->print(" (%d bytes)", callee->code_size());
|
|
3770 |
if (_inline_bailout_msg) {
|
|
3771 |
tty->print(" %s", _inline_bailout_msg);
|
|
3772 |
}
|
|
3773 |
tty->cr();
|
|
3774 |
|
|
3775 |
if (res && CIPrintMethodCodes) {
|
|
3776 |
callee->print_codes();
|
|
3777 |
}
|
|
3778 |
}
|
|
3779 |
|
|
3780 |
|
|
3781 |
void GraphBuilder::print_stats() {
|
|
3782 |
vmap()->print();
|
|
3783 |
}
|
|
3784 |
#endif // PRODUCT
|
|
3785 |
|
|
3786 |
|
|
3787 |
void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
|
|
3788 |
append(new ProfileCall(method(), bci(), recv, known_holder));
|
|
3789 |
}
|
|
3790 |
|
|
3791 |
|
|
3792 |
void GraphBuilder::profile_invocation(ciMethod* callee) {
|
|
3793 |
if (profile_calls()) {
|
|
3794 |
// increment the interpreter_invocation_count for the inlinee
|
|
3795 |
Value m = append(new Constant(new ObjectConstant(callee)));
|
|
3796 |
append(new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1));
|
|
3797 |
}
|
|
3798 |
}
|
|
3799 |
|
|
3800 |
|
|
3801 |
void GraphBuilder::profile_bci(int bci) {
|
|
3802 |
if (profile_branches()) {
|
|
3803 |
ciMethodData* md = method()->method_data();
|
|
3804 |
if (md == NULL) {
|
|
3805 |
BAILOUT("out of memory building methodDataOop");
|
|
3806 |
}
|
|
3807 |
ciProfileData* data = md->bci_to_data(bci);
|
|
3808 |
assert(data != NULL && data->is_JumpData(), "need JumpData for goto");
|
|
3809 |
Value mdo = append(new Constant(new ObjectConstant(md)));
|
|
3810 |
append(new ProfileCounter(mdo, md->byte_offset_of_slot(data, JumpData::taken_offset()), 1));
|
|
3811 |
}
|
|
3812 |
}
|