|
1 /* |
|
2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_C1_C1_LINEARSCAN_HPP |
|
26 #define SHARE_VM_C1_C1_LINEARSCAN_HPP |
|
27 |
|
28 #include "c1/c1_FpuStackSim.hpp" |
|
29 #include "c1/c1_FrameMap.hpp" |
|
30 #include "c1/c1_IR.hpp" |
|
31 #include "c1/c1_Instruction.hpp" |
|
32 #include "c1/c1_LIR.hpp" |
|
33 #include "c1/c1_LIRGenerator.hpp" |
|
34 #include "utilities/align.hpp" |
|
35 #include "utilities/macros.hpp" |
|
36 |
|
37 class DebugInfoCache; |
|
38 class FpuStackAllocator; |
|
39 class IRScopeDebugInfo; |
|
40 class Interval; |
|
41 class IntervalWalker; |
|
42 class LIRGenerator; |
|
43 class LinearScan; |
|
44 class MoveResolver; |
|
45 class Range; |
|
46 |
|
47 typedef GrowableArray<Interval*> IntervalArray; |
|
48 typedef GrowableArray<Interval*> IntervalList; |
|
49 typedef GrowableArray<IntervalList*> IntervalsList; |
|
50 typedef GrowableArray<ScopeValue*> ScopeValueArray; |
|
51 typedef GrowableArray<LIR_OpList*> LIR_OpListStack; |
|
52 |
|
53 enum IntervalUseKind { |
|
54 // priority of use kinds must be ascending |
|
55 noUse = 0, |
|
56 loopEndMarker = 1, |
|
57 shouldHaveRegister = 2, |
|
58 mustHaveRegister = 3, |
|
59 |
|
60 firstValidKind = 1, |
|
61 lastValidKind = 3 |
|
62 }; |
|
63 |
|
64 enum IntervalKind { |
|
65 fixedKind = 0, // interval pre-colored by LIR_Generator |
|
66 anyKind = 1, // no register/memory allocated by LIR_Generator |
|
67 nofKinds, |
|
68 firstKind = fixedKind |
|
69 }; |
|
70 |
|
71 |
|
72 // during linear scan an interval is in one of four states in |
|
73 enum IntervalState { |
|
74 unhandledState = 0, // unhandled state (not processed yet) |
|
75 activeState = 1, // life and is in a physical register |
|
76 inactiveState = 2, // in a life time hole and is in a physical register |
|
77 handledState = 3, // spilled or not life again |
|
78 invalidState = -1 |
|
79 }; |
|
80 |
|
81 |
|
82 enum IntervalSpillState { |
|
83 noDefinitionFound, // starting state of calculation: no definition found yet |
|
84 oneDefinitionFound, // one definition has already been found. |
|
85 // Note: two consecutive definitions are treated as one (e.g. consecutive move and add because of two-operand LIR form) |
|
86 // the position of this definition is stored in _definition_pos |
|
87 oneMoveInserted, // one spill move has already been inserted. |
|
88 storeAtDefinition, // the interval should be stored immediately after its definition because otherwise |
|
89 // there would be multiple redundant stores |
|
90 startInMemory, // the interval starts in memory (e.g. method parameter), so a store is never necessary |
|
91 noOptimization // the interval has more then one definition (e.g. resulting from phi moves), so stores to memory are not optimized |
|
92 }; |
|
93 |
|
94 |
|
95 #define for_each_interval_kind(kind) \ |
|
96 for (IntervalKind kind = firstKind; kind < nofKinds; kind = (IntervalKind)(kind + 1)) |
|
97 |
|
98 #define for_each_visitor_mode(mode) \ |
|
99 for (LIR_OpVisitState::OprMode mode = LIR_OpVisitState::firstMode; mode < LIR_OpVisitState::numModes; mode = (LIR_OpVisitState::OprMode)(mode + 1)) |
|
100 |
|
101 |
|
102 class LinearScan : public CompilationResourceObj { |
|
103 // declare classes used by LinearScan as friends because they |
|
104 // need a wide variety of functions declared here |
|
105 // |
|
106 // Only the small interface to the rest of the compiler is public |
|
107 friend class Interval; |
|
108 friend class IntervalWalker; |
|
109 friend class LinearScanWalker; |
|
110 friend class FpuStackAllocator; |
|
111 friend class MoveResolver; |
|
112 friend class LinearScanStatistic; |
|
113 friend class LinearScanTimers; |
|
114 friend class RegisterVerifier; |
|
115 |
|
116 public: |
|
117 enum { |
|
118 any_reg = -1, |
|
119 nof_cpu_regs = pd_nof_cpu_regs_linearscan, |
|
120 nof_fpu_regs = pd_nof_fpu_regs_linearscan, |
|
121 nof_xmm_regs = pd_nof_xmm_regs_linearscan, |
|
122 nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs |
|
123 }; |
|
124 |
|
125 private: |
|
126 Compilation* _compilation; |
|
127 IR* _ir; |
|
128 LIRGenerator* _gen; |
|
129 FrameMap* _frame_map; |
|
130 |
|
131 BlockList _cached_blocks; // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged) |
|
132 int _num_virtual_regs; // number of virtual registers (without new registers introduced because of splitting intervals) |
|
133 bool _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary) |
|
134 int _num_calls; // total number of calls in this method |
|
135 int _max_spills; // number of stack slots used for intervals allocated to memory |
|
136 int _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value |
|
137 |
|
138 IntervalList _intervals; // mapping from register number to interval |
|
139 IntervalList* _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split |
|
140 IntervalArray* _sorted_intervals; // intervals sorted by Interval::from() |
|
141 bool _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted |
|
142 |
|
143 LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node |
|
144 BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction |
|
145 ResourceBitMap _has_info; // bit set for each LIR_Op id that has a CodeEmitInfo |
|
146 ResourceBitMap _has_call; // bit set for each LIR_Op id that destroys all caller save registers |
|
147 BitMap2D _interval_in_loop; // bit set for each virtual register that is contained in each loop |
|
148 |
|
149 // cached debug info to prevent multiple creation of same object |
|
150 // TODO: cached scope values for registers could be static |
|
151 ScopeValueArray _scope_value_cache; |
|
152 |
|
153 static ConstantOopWriteValue* _oop_null_scope_value; |
|
154 static ConstantIntValue* _int_m1_scope_value; |
|
155 static ConstantIntValue* _int_0_scope_value; |
|
156 static ConstantIntValue* _int_1_scope_value; |
|
157 static ConstantIntValue* _int_2_scope_value; |
|
158 |
|
159 // accessors |
|
160 IR* ir() const { return _ir; } |
|
161 Compilation* compilation() const { return _compilation; } |
|
162 LIRGenerator* gen() const { return _gen; } |
|
163 FrameMap* frame_map() const { return _frame_map; } |
|
164 |
|
165 // unified bailout support |
|
166 void bailout(const char* msg) const { compilation()->bailout(msg); } |
|
167 bool bailed_out() const { return compilation()->bailed_out(); } |
|
168 |
|
169 // access to block list (sorted in linear scan order) |
|
170 int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); } |
|
171 BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); } |
|
172 |
|
173 int num_virtual_regs() const { return _num_virtual_regs; } |
|
174 // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration) |
|
175 int live_set_size() const { return align_up(_num_virtual_regs, BitsPerWord); } |
|
176 bool has_fpu_registers() const { return _has_fpu_registers; } |
|
177 int num_loops() const { return ir()->num_loops(); } |
|
178 bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); } |
|
179 |
|
180 // handling of fpu stack allocation (platform dependent, needed for debug information generation) |
|
181 #ifdef X86 |
|
182 FpuStackAllocator* _fpu_stack_allocator; |
|
183 bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); } |
|
184 #else |
|
185 bool use_fpu_stack_allocation() const { return false; } |
|
186 #endif |
|
187 |
|
188 |
|
189 // access to interval list |
|
190 int interval_count() const { return _intervals.length(); } |
|
191 Interval* interval_at(int reg_num) const { return _intervals.at(reg_num); } |
|
192 |
|
193 IntervalList* new_intervals_from_allocation() const { return _new_intervals_from_allocation; } |
|
194 |
|
195 // access to LIR_Ops and Blocks indexed by op_id |
|
196 int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; } |
|
197 LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); } |
|
198 BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); } |
|
199 |
|
200 bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); } |
|
201 bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); } |
|
202 |
|
203 bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); } |
|
204 bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); } |
|
205 |
|
206 |
|
207 // functions for converting LIR-Operands to register numbers |
|
208 static bool is_valid_reg_num(int reg_num) { return reg_num >= 0; } |
|
209 static int reg_num(LIR_Opr opr); |
|
210 static int reg_numHi(LIR_Opr opr); |
|
211 |
|
212 // functions for classification of intervals |
|
213 static bool is_precolored_interval(const Interval* i); |
|
214 static bool is_virtual_interval(const Interval* i); |
|
215 |
|
216 static bool is_precolored_cpu_interval(const Interval* i); |
|
217 static bool is_virtual_cpu_interval(const Interval* i); |
|
218 static bool is_precolored_fpu_interval(const Interval* i); |
|
219 static bool is_virtual_fpu_interval(const Interval* i); |
|
220 |
|
221 static bool is_in_fpu_register(const Interval* i); |
|
222 static bool is_oop_interval(const Interval* i); |
|
223 |
|
224 |
|
225 // General helper functions |
|
226 int allocate_spill_slot(bool double_word); |
|
227 void assign_spill_slot(Interval* it); |
|
228 void propagate_spill_slots(); |
|
229 |
|
230 Interval* create_interval(int reg_num); |
|
231 void append_interval(Interval* it); |
|
232 void copy_register_flags(Interval* from, Interval* to); |
|
233 |
|
234 // platform dependent functions |
|
235 static bool is_processed_reg_num(int reg_num); |
|
236 static int num_physical_regs(BasicType type); |
|
237 static bool requires_adjacent_regs(BasicType type); |
|
238 static bool is_caller_save(int assigned_reg); |
|
239 |
|
240 // spill move optimization: eliminate moves from register to stack if |
|
241 // stack slot is known to be correct |
|
242 void change_spill_definition_pos(Interval* interval, int def_pos); |
|
243 void change_spill_state(Interval* interval, int spill_pos); |
|
244 static bool must_store_at_definition(const Interval* i); |
|
245 void eliminate_spill_moves(); |
|
246 |
|
247 // Phase 1: number all instructions in all blocks |
|
248 void number_instructions(); |
|
249 |
|
250 // Phase 2: compute local live sets separately for each block |
|
251 // (sets live_gen and live_kill for each block) |
|
252 // |
|
253 // helper methods used by compute_local_live_sets() |
|
254 void set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill); |
|
255 |
|
256 void compute_local_live_sets(); |
|
257 |
|
258 // Phase 3: perform a backward dataflow analysis to compute global live sets |
|
259 // (sets live_in and live_out for each block) |
|
260 void compute_global_live_sets(); |
|
261 |
|
262 |
|
263 // Phase 4: build intervals |
|
264 // (fills the list _intervals) |
|
265 // |
|
266 // helper methods used by build_intervals() |
|
267 void add_use (Value value, int from, int to, IntervalUseKind use_kind); |
|
268 |
|
269 void add_def (LIR_Opr opr, int def_pos, IntervalUseKind use_kind); |
|
270 void add_use (LIR_Opr opr, int from, int to, IntervalUseKind use_kind); |
|
271 void add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind); |
|
272 |
|
273 void add_def (int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type); |
|
274 void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type); |
|
275 void add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type); |
|
276 |
|
277 // Add platform dependent kills for particular LIR ops. Can be used |
|
278 // to add platform dependent behaviour for some operations. |
|
279 void pd_add_temps(LIR_Op* op); |
|
280 |
|
281 IntervalUseKind use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr); |
|
282 IntervalUseKind use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr); |
|
283 void handle_method_arguments(LIR_Op* op); |
|
284 void handle_doubleword_moves(LIR_Op* op); |
|
285 void add_register_hints(LIR_Op* op); |
|
286 |
|
287 void build_intervals(); |
|
288 |
|
289 |
|
290 // Phase 5: actual register allocation |
|
291 // (Uses LinearScanWalker) |
|
292 // |
|
293 // helper functions for building a sorted list of intervals |
|
294 NOT_PRODUCT(bool is_sorted(IntervalArray* intervals);) |
|
295 static int interval_cmp(Interval** a, Interval** b); |
|
296 void add_to_list(Interval** first, Interval** prev, Interval* interval); |
|
297 void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i)); |
|
298 |
|
299 void sort_intervals_before_allocation(); |
|
300 void sort_intervals_after_allocation(); |
|
301 void allocate_registers(); |
|
302 |
|
303 |
|
304 // Phase 6: resolve data flow |
|
305 // (insert moves at edges between blocks if intervals have been split) |
|
306 // |
|
307 // helper functions for resolve_data_flow() |
|
308 Interval* split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode); |
|
309 Interval* interval_at_block_begin(BlockBegin* block, int reg_num); |
|
310 Interval* interval_at_block_end(BlockBegin* block, int reg_num); |
|
311 Interval* interval_at_op_id(int reg_num, int op_id); |
|
312 void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver); |
|
313 void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver); |
|
314 void resolve_data_flow(); |
|
315 |
|
316 void resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver); |
|
317 void resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver); |
|
318 void resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver); |
|
319 void resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver); |
|
320 void resolve_exception_handlers(); |
|
321 |
|
322 // Phase 7: assign register numbers back to LIR |
|
323 // (includes computation of debug information and oop maps) |
|
324 // |
|
325 // helper functions for assign_reg_num() |
|
326 VMReg vm_reg_for_interval(Interval* interval); |
|
327 VMReg vm_reg_for_operand(LIR_Opr opr); |
|
328 |
|
329 static LIR_Opr operand_for_interval(Interval* interval); |
|
330 static LIR_Opr calc_operand_for_interval(const Interval* interval); |
|
331 LIR_Opr canonical_spill_opr(Interval* interval); |
|
332 |
|
333 LIR_Opr color_lir_opr(LIR_Opr opr, int id, LIR_OpVisitState::OprMode); |
|
334 |
|
335 // methods used for oop map computation |
|
336 IntervalWalker* init_compute_oop_maps(); |
|
337 OopMap* compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site); |
|
338 void compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op); |
|
339 |
|
340 // methods used for debug information computation |
|
341 void init_compute_debug_info(); |
|
342 |
|
343 MonitorValue* location_for_monitor_index(int monitor_index); |
|
344 LocationValue* location_for_name(int name, Location::Type loc_type); |
|
345 void set_oop(OopMap* map, VMReg name) { |
|
346 if (map->legal_vm_reg_name(name)) { |
|
347 map->set_oop(name); |
|
348 } else { |
|
349 bailout("illegal oopMap register name"); |
|
350 } |
|
351 } |
|
352 |
|
353 int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values); |
|
354 int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values); |
|
355 int append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values); |
|
356 |
|
357 IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state); |
|
358 void compute_debug_info(CodeEmitInfo* info, int op_id); |
|
359 |
|
360 void assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw); |
|
361 void assign_reg_num(); |
|
362 |
|
363 |
|
364 // Phase 8: fpu stack allocation |
|
365 // (Used only on x86 when fpu operands are present) |
|
366 void allocate_fpu_stack(); |
|
367 |
|
368 |
|
369 // helper functions for printing state |
|
370 #ifndef PRODUCT |
|
371 static void print_bitmap(BitMap& bitmap); |
|
372 void print_intervals(const char* label); |
|
373 void print_lir(int level, const char* label, bool hir_valid = true); |
|
374 #endif |
|
375 |
|
376 #ifdef ASSERT |
|
377 // verification functions for allocation |
|
378 // (check that all intervals have a correct register and that no registers are overwritten) |
|
379 void verify(); |
|
380 void verify_intervals(); |
|
381 void verify_no_oops_in_fixed_intervals(); |
|
382 void verify_constants(); |
|
383 void verify_registers(); |
|
384 #endif |
|
385 |
|
386 public: |
|
387 // creation |
|
388 LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map); |
|
389 |
|
390 // main entry function: perform linear scan register allocation |
|
391 void do_linear_scan(); |
|
392 |
|
393 // accessors used by Compilation |
|
394 int max_spills() const { return _max_spills; } |
|
395 int num_calls() const { assert(_num_calls >= 0, "not set"); return _num_calls; } |
|
396 |
|
397 // entry functions for printing |
|
398 #ifndef PRODUCT |
|
399 static void print_statistics(); |
|
400 static void print_timers(double total); |
|
401 #endif |
|
402 }; |
|
403 |
|
404 |
|
405 // Helper class for ordering moves that are inserted at the same position in the LIR |
|
406 // When moves between registers are inserted, it is important that the moves are |
|
407 // ordered such that no register is overwritten. So moves from register to stack |
|
408 // are processed prior to moves from stack to register. When moves have circular |
|
409 // dependencies, a temporary stack slot is used to break the circle. |
|
410 // The same logic is used in the LinearScanWalker and in LinearScan during resolve_data_flow |
|
411 // and therefore factored out in a separate class |
|
412 class MoveResolver: public StackObj { |
|
413 private: |
|
414 LinearScan* _allocator; |
|
415 |
|
416 LIR_List* _insert_list; |
|
417 int _insert_idx; |
|
418 LIR_InsertionBuffer _insertion_buffer; // buffer where moves are inserted |
|
419 |
|
420 IntervalList _mapping_from; |
|
421 LIR_OprList _mapping_from_opr; |
|
422 IntervalList _mapping_to; |
|
423 bool _multiple_reads_allowed; |
|
424 int _register_blocked[LinearScan::nof_regs]; |
|
425 |
|
426 int register_blocked(int reg) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; } |
|
427 void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; } |
|
428 |
|
429 void block_registers(Interval* it); |
|
430 void unblock_registers(Interval* it); |
|
431 bool save_to_process_move(Interval* from, Interval* to); |
|
432 |
|
433 void create_insertion_buffer(LIR_List* list); |
|
434 void append_insertion_buffer(); |
|
435 void insert_move(Interval* from_interval, Interval* to_interval); |
|
436 void insert_move(LIR_Opr from_opr, Interval* to_interval); |
|
437 |
|
438 DEBUG_ONLY(void verify_before_resolve();) |
|
439 void resolve_mappings(); |
|
440 public: |
|
441 MoveResolver(LinearScan* allocator); |
|
442 |
|
443 DEBUG_ONLY(void check_empty();) |
|
444 void set_multiple_reads_allowed() { _multiple_reads_allowed = true; } |
|
445 void set_insert_position(LIR_List* insert_list, int insert_idx); |
|
446 void move_insert_position(LIR_List* insert_list, int insert_idx); |
|
447 void add_mapping(Interval* from, Interval* to); |
|
448 void add_mapping(LIR_Opr from, Interval* to); |
|
449 void resolve_and_append_moves(); |
|
450 |
|
451 LinearScan* allocator() { return _allocator; } |
|
452 bool has_mappings() { return _mapping_from.length() > 0; } |
|
453 }; |
|
454 |
|
455 |
|
456 class Range : public CompilationResourceObj { |
|
457 friend class Interval; |
|
458 |
|
459 private: |
|
460 static Range* _end; // sentinel (from == to == max_jint) |
|
461 |
|
462 int _from; // from (inclusive) |
|
463 int _to; // to (exclusive) |
|
464 Range* _next; // linear list of Ranges |
|
465 |
|
466 // used only by class Interval, so hide them |
|
467 bool intersects(Range* r) const { return intersects_at(r) != -1; } |
|
468 int intersects_at(Range* r) const; |
|
469 |
|
470 public: |
|
471 Range(int from, int to, Range* next); |
|
472 |
|
473 static void initialize(Arena* arena); |
|
474 static Range* end() { return _end; } |
|
475 |
|
476 int from() const { return _from; } |
|
477 int to() const { return _to; } |
|
478 Range* next() const { return _next; } |
|
479 void set_from(int from) { _from = from; } |
|
480 void set_to(int to) { _to = to; } |
|
481 void set_next(Range* next) { _next = next; } |
|
482 |
|
483 // for testing |
|
484 void print(outputStream* out = tty) const PRODUCT_RETURN; |
|
485 }; |
|
486 |
|
487 |
|
488 // Interval is an ordered list of disjoint ranges. |
|
489 |
|
490 // For pre-colored double word LIR_Oprs, one interval is created for |
|
491 // the low word register and one is created for the hi word register. |
|
492 // On Intel for FPU double registers only one interval is created. At |
|
493 // all times assigned_reg contains the reg. number of the physical |
|
494 // register. |
|
495 |
|
496 // For LIR_Opr in virtual registers a single interval can represent |
|
497 // single and double word values. When a physical register is |
|
498 // assigned to the interval, assigned_reg contains the |
|
499 // phys. reg. number and for double word values assigned_regHi the |
|
500 // phys. reg. number of the hi word if there is any. For spilled |
|
501 // intervals assigned_reg contains the stack index. assigned_regHi is |
|
502 // always -1. |
|
503 |
|
504 class Interval : public CompilationResourceObj { |
|
505 private: |
|
506 static Interval* _end; // sentinel (interval with only range Range::end()) |
|
507 |
|
508 int _reg_num; |
|
509 BasicType _type; // valid only for virtual registers |
|
510 Range* _first; // sorted list of Ranges |
|
511 intStack _use_pos_and_kinds; // sorted list of use-positions and their according use-kinds |
|
512 |
|
513 Range* _current; // interval iteration: the current Range |
|
514 Interval* _next; // interval iteration: sorted list of Intervals (ends with sentinel) |
|
515 IntervalState _state; // interval iteration: to which set belongs this interval |
|
516 |
|
517 |
|
518 int _assigned_reg; |
|
519 int _assigned_regHi; |
|
520 |
|
521 int _cached_to; // cached value: to of last range (-1: not cached) |
|
522 LIR_Opr _cached_opr; |
|
523 VMReg _cached_vm_reg; |
|
524 |
|
525 Interval* _split_parent; // the original interval where this interval is derived from |
|
526 IntervalList _split_children; // list of all intervals that are split off from this interval (only available for split parents) |
|
527 Interval* _current_split_child; // the current split child that has been active or inactive last (always stored in split parents) |
|
528 |
|
529 int _canonical_spill_slot; // the stack slot where all split parts of this interval are spilled to (always stored in split parents) |
|
530 bool _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time |
|
531 IntervalSpillState _spill_state; // for spill move optimization |
|
532 int _spill_definition_pos; // position where the interval is defined (if defined only once) |
|
533 Interval* _register_hint; // this interval should be in the same register as the hint interval |
|
534 |
|
535 int calc_to(); |
|
536 Interval* new_split_child(); |
|
537 public: |
|
538 Interval(int reg_num); |
|
539 |
|
540 static void initialize(Arena* arena); |
|
541 static Interval* end() { return _end; } |
|
542 |
|
543 // accessors |
|
544 int reg_num() const { return _reg_num; } |
|
545 void set_reg_num(int r) { assert(_reg_num == -1, "cannot change reg_num"); _reg_num = r; } |
|
546 BasicType type() const { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; } |
|
547 void set_type(BasicType type) { assert(_reg_num < LIR_OprDesc::vreg_base || _type == T_ILLEGAL || _type == type, "overwriting existing type"); _type = type; } |
|
548 |
|
549 Range* first() const { return _first; } |
|
550 int from() const { return _first->from(); } |
|
551 int to() { if (_cached_to == -1) _cached_to = calc_to(); assert(_cached_to == calc_to(), "invalid cached value"); return _cached_to; } |
|
552 int num_use_positions() const { return _use_pos_and_kinds.length() / 2; } |
|
553 |
|
554 Interval* next() const { return _next; } |
|
555 Interval** next_addr() { return &_next; } |
|
556 void set_next(Interval* next) { _next = next; } |
|
557 |
|
558 int assigned_reg() const { return _assigned_reg; } |
|
559 int assigned_regHi() const { return _assigned_regHi; } |
|
560 void assign_reg(int reg) { _assigned_reg = reg; _assigned_regHi = LinearScan::any_reg; } |
|
561 void assign_reg(int reg,int regHi) { _assigned_reg = reg; _assigned_regHi = regHi; } |
|
562 |
|
563 Interval* register_hint(bool search_split_child = true) const; // calculation needed |
|
564 void set_register_hint(Interval* i) { _register_hint = i; } |
|
565 |
|
566 int state() const { return _state; } |
|
567 void set_state(IntervalState s) { _state = s; } |
|
568 |
|
569 // access to split parent and split children |
|
570 bool is_split_parent() const { return _split_parent == this; } |
|
571 bool is_split_child() const { return _split_parent != this; } |
|
572 Interval* split_parent() const { assert(_split_parent->is_split_parent(), "must be"); return _split_parent; } |
|
573 Interval* split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode); |
|
574 Interval* split_child_before_op_id(int op_id); |
|
575 bool split_child_covers(int op_id, LIR_OpVisitState::OprMode mode); |
|
576 DEBUG_ONLY(void check_split_children();) |
|
577 |
|
578 // information stored in split parent, but available for all children |
|
579 int canonical_spill_slot() const { return split_parent()->_canonical_spill_slot; } |
|
580 void set_canonical_spill_slot(int slot) { assert(split_parent()->_canonical_spill_slot == -1, "overwriting existing value"); split_parent()->_canonical_spill_slot = slot; } |
|
581 Interval* current_split_child() const { return split_parent()->_current_split_child; } |
|
582 void make_current_split_child() { split_parent()->_current_split_child = this; } |
|
583 |
|
584 bool insert_move_when_activated() const { return _insert_move_when_activated; } |
|
585 void set_insert_move_when_activated(bool b) { _insert_move_when_activated = b; } |
|
586 |
|
587 // for spill optimization |
|
588 IntervalSpillState spill_state() const { return split_parent()->_spill_state; } |
|
589 int spill_definition_pos() const { return split_parent()->_spill_definition_pos; } |
|
590 void set_spill_state(IntervalSpillState state) { assert(state >= spill_state(), "state cannot decrease"); split_parent()->_spill_state = state; } |
|
591 void set_spill_definition_pos(int pos) { assert(spill_definition_pos() == -1, "cannot set the position twice"); split_parent()->_spill_definition_pos = pos; } |
|
592 // returns true if this interval has a shadow copy on the stack that is always correct |
|
593 bool always_in_memory() const { return split_parent()->_spill_state == storeAtDefinition || split_parent()->_spill_state == startInMemory; } |
|
594 |
|
595 // caching of values that take time to compute and are used multiple times |
|
596 LIR_Opr cached_opr() const { return _cached_opr; } |
|
597 VMReg cached_vm_reg() const { return _cached_vm_reg; } |
|
598 void set_cached_opr(LIR_Opr opr) { _cached_opr = opr; } |
|
599 void set_cached_vm_reg(VMReg reg) { _cached_vm_reg = reg; } |
|
600 |
|
601 // access to use positions |
|
602 int first_usage(IntervalUseKind min_use_kind) const; // id of the first operation requiring this interval in a register |
|
603 int next_usage(IntervalUseKind min_use_kind, int from) const; // id of next usage seen from the given position |
|
604 int next_usage_exact(IntervalUseKind exact_use_kind, int from) const; |
|
605 int previous_usage(IntervalUseKind min_use_kind, int from) const; |
|
606 |
|
607 // manipulating intervals |
|
608 void add_use_pos(int pos, IntervalUseKind use_kind); |
|
609 void add_range(int from, int to); |
|
610 Interval* split(int split_pos); |
|
611 Interval* split_from_start(int split_pos); |
|
612 void remove_first_use_pos() { _use_pos_and_kinds.trunc_to(_use_pos_and_kinds.length() - 2); } |
|
613 |
|
614 // test intersection |
|
615 bool covers(int op_id, LIR_OpVisitState::OprMode mode) const; |
|
616 bool has_hole_between(int from, int to); |
|
617 bool intersects(Interval* i) const { return _first->intersects(i->_first); } |
|
618 int intersects_at(Interval* i) const { return _first->intersects_at(i->_first); } |
|
619 |
|
620 // range iteration |
|
621 void rewind_range() { _current = _first; } |
|
622 void next_range() { assert(this != _end, "not allowed on sentinel"); _current = _current->next(); } |
|
623 int current_from() const { return _current->from(); } |
|
624 int current_to() const { return _current->to(); } |
|
625 bool current_at_end() const { return _current == Range::end(); } |
|
626 bool current_intersects(Interval* it) { return _current->intersects(it->_current); }; |
|
627 int current_intersects_at(Interval* it) { return _current->intersects_at(it->_current); }; |
|
628 |
|
629 // printing |
|
630 void print(outputStream* out = tty) const PRODUCT_RETURN; |
|
631 }; |
|
632 |
|
633 |
|
634 class IntervalWalker : public CompilationResourceObj { |
|
635 protected: |
|
636 Compilation* _compilation; |
|
637 LinearScan* _allocator; |
|
638 |
|
639 Interval* _unhandled_first[nofKinds]; // sorted list of intervals, not life before the current position |
|
640 Interval* _active_first [nofKinds]; // sorted list of intervals, life at the current position |
|
641 Interval* _inactive_first [nofKinds]; // sorted list of intervals, intervals in a life time hole at the current position |
|
642 |
|
643 Interval* _current; // the current interval coming from unhandled list |
|
644 int _current_position; // the current position (intercept point through the intervals) |
|
645 IntervalKind _current_kind; // and whether it is fixed_kind or any_kind. |
|
646 |
|
647 |
|
648 Compilation* compilation() const { return _compilation; } |
|
649 LinearScan* allocator() const { return _allocator; } |
|
650 |
|
651 // unified bailout support |
|
652 void bailout(const char* msg) const { compilation()->bailout(msg); } |
|
653 bool bailed_out() const { return compilation()->bailed_out(); } |
|
654 |
|
655 void check_bounds(IntervalKind kind) { assert(kind >= fixedKind && kind <= anyKind, "invalid interval_kind"); } |
|
656 |
|
657 Interval** unhandled_first_addr(IntervalKind kind) { check_bounds(kind); return &_unhandled_first[kind]; } |
|
658 Interval** active_first_addr(IntervalKind kind) { check_bounds(kind); return &_active_first[kind]; } |
|
659 Interval** inactive_first_addr(IntervalKind kind) { check_bounds(kind); return &_inactive_first[kind]; } |
|
660 |
|
661 void append_unsorted(Interval** first, Interval* interval); |
|
662 void append_sorted(Interval** first, Interval* interval); |
|
663 void append_to_unhandled(Interval** list, Interval* interval); |
|
664 |
|
665 bool remove_from_list(Interval** list, Interval* i); |
|
666 void remove_from_list(Interval* i); |
|
667 |
|
668 void next_interval(); |
|
669 Interval* current() const { return _current; } |
|
670 IntervalKind current_kind() const { return _current_kind; } |
|
671 |
|
672 void walk_to(IntervalState state, int from); |
|
673 |
|
674 // activate_current() is called when an unhandled interval becomes active (in current(), current_kind()). |
|
675 // Return false if current() should not be moved the the active interval list. |
|
676 // It is safe to append current to any interval list but the unhandled list. |
|
677 virtual bool activate_current() { return true; } |
|
678 |
|
679 // interval_moved() is called whenever an interval moves from one interval list to another. |
|
680 // In the implementation of this method it is prohibited to move the interval to any list. |
|
681 virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to); |
|
682 |
|
683 public: |
|
684 IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first); |
|
685 |
|
686 Interval* unhandled_first(IntervalKind kind) { check_bounds(kind); return _unhandled_first[kind]; } |
|
687 Interval* active_first(IntervalKind kind) { check_bounds(kind); return _active_first[kind]; } |
|
688 Interval* inactive_first(IntervalKind kind) { check_bounds(kind); return _inactive_first[kind]; } |
|
689 |
|
690 // active contains the intervals that are live after the lir_op |
|
691 void walk_to(int lir_op_id); |
|
692 // active contains the intervals that are live before the lir_op |
|
693 void walk_before(int lir_op_id) { walk_to(lir_op_id-1); } |
|
694 // walk through all intervals |
|
695 void walk() { walk_to(max_jint); } |
|
696 |
|
697 int current_position() { return _current_position; } |
|
698 }; |
|
699 |
|
700 |
|
701 // The actual linear scan register allocator |
|
702 class LinearScanWalker : public IntervalWalker { |
|
703 enum { |
|
704 any_reg = LinearScan::any_reg |
|
705 }; |
|
706 |
|
707 private: |
|
708 int _first_reg; // the reg. number of the first phys. register |
|
709 int _last_reg; // the reg. nmber of the last phys. register |
|
710 int _num_phys_regs; // required by current interval |
|
711 bool _adjacent_regs; // have lo/hi words of phys. regs be adjacent |
|
712 |
|
713 int _use_pos[LinearScan::nof_regs]; |
|
714 int _block_pos[LinearScan::nof_regs]; |
|
715 IntervalList* _spill_intervals[LinearScan::nof_regs]; |
|
716 |
|
717 MoveResolver _move_resolver; // for ordering spill moves |
|
718 |
|
719 // accessors mapped to same functions in class LinearScan |
|
720 int block_count() const { return allocator()->block_count(); } |
|
721 BlockBegin* block_at(int idx) const { return allocator()->block_at(idx); } |
|
722 BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); } |
|
723 |
|
724 void init_use_lists(bool only_process_use_pos); |
|
725 void exclude_from_use(int reg); |
|
726 void exclude_from_use(Interval* i); |
|
727 void set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos); |
|
728 void set_use_pos(Interval* i, int use_pos, bool only_process_use_pos); |
|
729 void set_block_pos(int reg, Interval* i, int block_pos); |
|
730 void set_block_pos(Interval* i, int block_pos); |
|
731 |
|
732 void free_exclude_active_fixed(); |
|
733 void free_exclude_active_any(); |
|
734 void free_collect_inactive_fixed(Interval* cur); |
|
735 void free_collect_inactive_any(Interval* cur); |
|
736 void free_collect_unhandled(IntervalKind kind, Interval* cur); |
|
737 void spill_exclude_active_fixed(); |
|
738 void spill_block_unhandled_fixed(Interval* cur); |
|
739 void spill_block_inactive_fixed(Interval* cur); |
|
740 void spill_collect_active_any(); |
|
741 void spill_collect_inactive_any(Interval* cur); |
|
742 |
|
743 void insert_move(int op_id, Interval* src_it, Interval* dst_it); |
|
744 int find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos); |
|
745 int find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization); |
|
746 void split_before_usage(Interval* it, int min_split_pos, int max_split_pos); |
|
747 void split_for_spilling(Interval* it); |
|
748 void split_stack_interval(Interval* it); |
|
749 void split_when_partial_register_available(Interval* it, int register_available_until); |
|
750 void split_and_spill_interval(Interval* it); |
|
751 |
|
752 int find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split); |
|
753 int find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split); |
|
754 bool alloc_free_reg(Interval* cur); |
|
755 |
|
756 int find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split); |
|
757 int find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split); |
|
758 void split_and_spill_intersecting_intervals(int reg, int regHi); |
|
759 void alloc_locked_reg(Interval* cur); |
|
760 |
|
761 bool no_allocation_possible(Interval* cur); |
|
762 void update_phys_reg_range(bool requires_cpu_register); |
|
763 void init_vars_for_alloc(Interval* cur); |
|
764 bool pd_init_regs_for_alloc(Interval* cur); |
|
765 |
|
766 void combine_spilled_intervals(Interval* cur); |
|
767 bool is_move(LIR_Op* op, Interval* from, Interval* to); |
|
768 |
|
769 bool activate_current(); |
|
770 |
|
771 public: |
|
772 LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first); |
|
773 |
|
774 // must be called when all intervals are allocated |
|
775 void finish_allocation() { _move_resolver.resolve_and_append_moves(); } |
|
776 }; |
|
777 |
|
778 |
|
779 |
|
780 /* |
|
781 When a block has more than one predecessor, and all predecessors end with |
|
782 the same sequence of move-instructions, than this moves can be placed once |
|
783 at the beginning of the block instead of multiple times in the predecessors. |
|
784 |
|
785 Similarly, when a block has more than one successor, then equal sequences of |
|
786 moves at the beginning of the successors can be placed once at the end of |
|
787 the block. But because the moves must be inserted before all branch |
|
788 instructions, this works only when there is exactly one conditional branch |
|
789 at the end of the block (because the moves must be inserted before all |
|
790 branches, but after all compares). |
|
791 |
|
792 This optimization affects all kind of moves (reg->reg, reg->stack and |
|
793 stack->reg). Because this optimization works best when a block contains only |
|
794 few moves, it has a huge impact on the number of blocks that are totally |
|
795 empty. |
|
796 */ |
|
797 class EdgeMoveOptimizer : public StackObj { |
|
798 private: |
|
799 // the class maintains a list with all lir-instruction-list of the |
|
800 // successors (predecessors) and the current index into the lir-lists |
|
801 LIR_OpListStack _edge_instructions; |
|
802 intStack _edge_instructions_idx; |
|
803 |
|
804 void init_instructions(); |
|
805 void append_instructions(LIR_OpList* instructions, int instructions_idx); |
|
806 LIR_Op* instruction_at(int edge); |
|
807 void remove_cur_instruction(int edge, bool decrement_index); |
|
808 |
|
809 bool operations_different(LIR_Op* op1, LIR_Op* op2); |
|
810 |
|
811 void optimize_moves_at_block_end(BlockBegin* cur); |
|
812 void optimize_moves_at_block_begin(BlockBegin* cur); |
|
813 |
|
814 EdgeMoveOptimizer(); |
|
815 |
|
816 public: |
|
817 static void optimize(BlockList* code); |
|
818 }; |
|
819 |
|
820 |
|
821 |
|
822 class ControlFlowOptimizer : public StackObj { |
|
823 private: |
|
824 BlockList _original_preds; |
|
825 |
|
826 enum { |
|
827 ShortLoopSize = 5 |
|
828 }; |
|
829 void reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx); |
|
830 void reorder_short_loops(BlockList* code); |
|
831 |
|
832 bool can_delete_block(BlockBegin* cur); |
|
833 void substitute_branch_target(BlockBegin* cur, BlockBegin* target_from, BlockBegin* target_to); |
|
834 void delete_empty_blocks(BlockList* code); |
|
835 |
|
836 void delete_unnecessary_jumps(BlockList* code); |
|
837 void delete_jumps_to_return(BlockList* code); |
|
838 |
|
839 DEBUG_ONLY(void verify(BlockList* code);) |
|
840 |
|
841 ControlFlowOptimizer(); |
|
842 public: |
|
843 static void optimize(BlockList* code); |
|
844 }; |
|
845 |
|
846 |
|
847 #ifndef PRODUCT |
|
848 |
|
849 // Helper class for collecting statistics of LinearScan |
|
850 class LinearScanStatistic : public StackObj { |
|
851 public: |
|
852 enum Counter { |
|
853 // general counters |
|
854 counter_method, |
|
855 counter_fpu_method, |
|
856 counter_loop_method, |
|
857 counter_exception_method, |
|
858 counter_loop, |
|
859 counter_block, |
|
860 counter_loop_block, |
|
861 counter_exception_block, |
|
862 counter_interval, |
|
863 counter_fixed_interval, |
|
864 counter_range, |
|
865 counter_fixed_range, |
|
866 counter_use_pos, |
|
867 counter_fixed_use_pos, |
|
868 counter_spill_slots, |
|
869 blank_line_1, |
|
870 |
|
871 // counter for classes of lir instructions |
|
872 counter_instruction, |
|
873 counter_label, |
|
874 counter_entry, |
|
875 counter_return, |
|
876 counter_call, |
|
877 counter_move, |
|
878 counter_cmp, |
|
879 counter_cond_branch, |
|
880 counter_uncond_branch, |
|
881 counter_stub_branch, |
|
882 counter_alu, |
|
883 counter_alloc, |
|
884 counter_sync, |
|
885 counter_throw, |
|
886 counter_unwind, |
|
887 counter_typecheck, |
|
888 counter_fpu_stack, |
|
889 counter_misc_inst, |
|
890 counter_other_inst, |
|
891 blank_line_2, |
|
892 |
|
893 // counter for different types of moves |
|
894 counter_move_total, |
|
895 counter_move_reg_reg, |
|
896 counter_move_reg_stack, |
|
897 counter_move_stack_reg, |
|
898 counter_move_stack_stack, |
|
899 counter_move_reg_mem, |
|
900 counter_move_mem_reg, |
|
901 counter_move_const_any, |
|
902 |
|
903 number_of_counters, |
|
904 invalid_counter = -1 |
|
905 }; |
|
906 |
|
907 private: |
|
908 int _counters_sum[number_of_counters]; |
|
909 int _counters_max[number_of_counters]; |
|
910 |
|
911 void inc_counter(Counter idx, int value = 1) { _counters_sum[idx] += value; } |
|
912 |
|
913 const char* counter_name(int counter_idx); |
|
914 Counter base_counter(int counter_idx); |
|
915 |
|
916 void sum_up(LinearScanStatistic &method_statistic); |
|
917 void collect(LinearScan* allocator); |
|
918 |
|
919 public: |
|
920 LinearScanStatistic(); |
|
921 void print(const char* title); |
|
922 static void compute(LinearScan* allocator, LinearScanStatistic &global_statistic); |
|
923 }; |
|
924 |
|
925 |
|
926 // Helper class for collecting compilation time of LinearScan |
|
927 class LinearScanTimers : public StackObj { |
|
928 public: |
|
929 enum Timer { |
|
930 timer_do_nothing, |
|
931 timer_number_instructions, |
|
932 timer_compute_local_live_sets, |
|
933 timer_compute_global_live_sets, |
|
934 timer_build_intervals, |
|
935 timer_sort_intervals_before, |
|
936 timer_allocate_registers, |
|
937 timer_resolve_data_flow, |
|
938 timer_sort_intervals_after, |
|
939 timer_eliminate_spill_moves, |
|
940 timer_assign_reg_num, |
|
941 timer_allocate_fpu_stack, |
|
942 timer_optimize_lir, |
|
943 |
|
944 number_of_timers |
|
945 }; |
|
946 |
|
947 private: |
|
948 elapsedTimer _timers[number_of_timers]; |
|
949 const char* timer_name(int idx); |
|
950 |
|
951 public: |
|
952 LinearScanTimers(); |
|
953 |
|
954 void begin_method(); // called for each method when register allocation starts |
|
955 void end_method(LinearScan* allocator); // called for each method when register allocation completed |
|
956 void print(double total_time); // called before termination of VM to print global summary |
|
957 |
|
958 elapsedTimer* timer(int idx) { return &(_timers[idx]); } |
|
959 }; |
|
960 |
|
961 |
|
962 #endif // ifndef PRODUCT |
|
963 |
|
964 // Pick up platform-dependent implementation details |
|
965 #include CPU_HEADER(c1_LinearScan) |
|
966 |
|
967 #endif // SHARE_VM_C1_C1_LINEARSCAN_HPP |