169 int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); } |
169 int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); } |
170 BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); } |
170 BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); } |
171 |
171 |
172 int num_virtual_regs() const { return _num_virtual_regs; } |
172 int num_virtual_regs() const { return _num_virtual_regs; } |
173 // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration) |
173 // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration) |
174 int live_set_size() const { return round_to(_num_virtual_regs, BitsPerWord); } |
174 int live_set_size() const { return align_up(_num_virtual_regs, BitsPerWord); } |
175 bool has_fpu_registers() const { return _has_fpu_registers; } |
175 bool has_fpu_registers() const { return _has_fpu_registers; } |
176 int num_loops() const { return ir()->num_loops(); } |
176 int num_loops() const { return ir()->num_loops(); } |
177 bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); } |
177 bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); } |
178 |
178 |
179 // handling of fpu stack allocation (platform dependent, needed for debug information generation) |
179 // handling of fpu stack allocation (platform dependent, needed for debug information generation) |