diff -r dbeb68f8a0ee -r 717c3345024f hotspot/src/share/vm/c1/c1_LinearScan.cpp --- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp Tue Aug 26 15:49:40 2008 -0700 +++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp Wed Aug 27 00:21:55 2008 -0700 @@ -80,7 +80,7 @@ , _scope_value_cache(0) // initialized later with correct length , _interval_in_loop(0, 0) // initialized later with correct length , _cached_blocks(*ir->linear_scan_order()) -#ifdef IA32 +#ifdef X86 , _fpu_stack_allocator(NULL) #endif { @@ -116,7 +116,7 @@ return opr->cpu_regnr(); } else if (opr->is_double_cpu()) { return opr->cpu_regnrLo(); -#ifdef IA32 +#ifdef X86 } else if (opr->is_single_xmm()) { return opr->fpu_regnr() + pd_first_xmm_reg; } else if (opr->is_double_xmm()) { @@ -128,6 +128,7 @@ return opr->fpu_regnrLo() + pd_first_fpu_reg; } else { ShouldNotReachHere(); + return -1; } } @@ -140,7 +141,7 @@ return -1; } else if (opr->is_double_cpu()) { return opr->cpu_regnrHi(); -#ifdef IA32 +#ifdef X86 } else if (opr->is_single_xmm()) { return -1; } else if (opr->is_double_xmm()) { @@ -152,6 +153,7 @@ return opr->fpu_regnrHi() + pd_first_fpu_reg; } else { ShouldNotReachHere(); + return -1; } } @@ -1063,7 +1065,7 @@ } -#ifdef IA32 +#ifdef X86 if (op->code() == lir_cmove) { // conditional moves can handle stack operands assert(op->result_opr()->is_register(), "result must always be in a register"); @@ -1128,7 +1130,7 @@ } } } -#endif // IA32 +#endif // X86 // all other operands require a register return mustHaveRegister; @@ -1261,7 +1263,7 @@ // virtual fpu operands. Otherwise no allocation for fpu registers is // perfomed and so the temp ranges would be useless if (has_fpu_registers()) { -#ifdef IA32 +#ifdef X86 if (UseSSE < 2) { #endif for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) { @@ -1270,7 +1272,7 @@ assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); caller_save_registers[num_caller_save_registers++] = reg_num(opr); } -#ifdef IA32 +#ifdef X86 } if (UseSSE > 0) { for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) { @@ -1299,8 +1301,8 @@ // Update intervals for registers live at the end of this block; BitMap live = block->live_out(); - int size = live.size(); - for (int number = live.get_next_one_offset(0, size); number < size; number = live.get_next_one_offset(number + 1, size)) { + int size = (int)live.size(); + for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) { assert(live.at(number), "should not stop here otherwise"); assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds"); TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2)); @@ -1654,7 +1656,7 @@ const BitMap live_at_edge = to_block->live_in(); // visit all registers where the live_at_edge bit is set - for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) { + for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) { assert(r < num_regs, "live information set for not exisiting interval"); assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge"); @@ -1824,7 +1826,7 @@ // visit all registers where the live_in bit is set int size = live_set_size(); - for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) { + for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) { resolve_exception_entry(block, r, move_resolver); } @@ -1898,7 +1900,7 @@ // visit all registers where the live_in bit is set BlockBegin* block = handler->entry_block(); int size = live_set_size(); - for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) { + for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) { resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver); } @@ -2032,19 +2034,19 @@ assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even"); } -#ifdef SPARC #ifdef _LP64 return LIR_OprFact::double_cpu(assigned_reg, assigned_reg); #else +#ifdef SPARC return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg); -#endif #else return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi); -#endif +#endif // SPARC +#endif // LP64 } case T_FLOAT: { -#ifdef IA32 +#ifdef X86 if (UseSSE >= 1) { assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); assert(interval->assigned_regHi() == any_reg, "must not have hi register"); @@ -2058,7 +2060,7 @@ } case T_DOUBLE: { -#ifdef IA32 +#ifdef X86 if (UseSSE >= 2) { assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)"); @@ -2122,7 +2124,7 @@ LIR_Opr res = operand_for_interval(interval); -#ifdef IA32 +#ifdef X86 // new semantic for is_last_use: not only set on definite end of interval, // but also before hole // This may still miss some cases (e.g. for dead values), but it is not necessary that the @@ -2475,6 +2477,7 @@ default: ShouldNotReachHere(); + return -1; } } @@ -2515,7 +2518,7 @@ scope_values->append(sv); return 1; -#ifdef IA32 +#ifdef X86 } else if (opr->is_single_xmm()) { VMReg rname = opr->as_xmm_float_reg()->as_VMReg(); LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname)); @@ -2525,7 +2528,7 @@ #endif } else if (opr->is_single_fpu()) { -#ifdef IA32 +#ifdef X86 // the exact location of fpu stack values is only known // during fpu stack allocation, so the stack allocator object // must be present @@ -2548,12 +2551,23 @@ ScopeValue* second; if (opr->is_double_stack()) { +#ifdef _LP64 + Location loc1; + Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl; + if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) { + bailout("too large frame"); + } + // Does this reverse on x86 vs. sparc? + first = new LocationValue(loc1); + second = &_int_0_scope_value; +#else Location loc1, loc2; if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) { bailout("too large frame"); } first = new LocationValue(loc1); second = new LocationValue(loc2); +#endif // _LP64 } else if (opr->is_double_cpu()) { #ifdef _LP64 @@ -2573,9 +2587,10 @@ first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); -#endif - -#ifdef IA32 +#endif //_LP64 + + +#ifdef X86 } else if (opr->is_double_xmm()) { assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation"); VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg(); @@ -2589,13 +2604,13 @@ } else if (opr->is_double_fpu()) { // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of - // the double as float registers in the native ordering. On IA32, + // the double as float registers in the native ordering. On X86, // fpu_regnrLo is a FPU stack slot whose VMReg represents // the low-order word of the double and fpu_regnrLo + 1 is the // name for the other half. *first and *second must represent the // least and most significant words, respectively. -#ifdef IA32 +#ifdef X86 // the exact location of fpu stack values is only known // during fpu stack allocation, so the stack allocator object // must be present @@ -2865,7 +2880,6 @@ op->verify(); #endif -#ifndef _LP64 // remove useless moves if (op->code() == lir_move) { assert(op->as_Op1() != NULL, "move must be LIR_Op1"); @@ -2879,7 +2893,6 @@ has_dead = true; } } -#endif } if (has_dead) { @@ -3192,7 +3205,7 @@ BitMap live_at_edge = block->live_in(); // visit all registers where the live_at_edge bit is set - for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) { + for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) { TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id())); Value value = gen()->instruction_for_vreg(r); @@ -3438,7 +3451,7 @@ state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL); } -#ifdef IA32 +#ifdef X86 for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) { state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL); } @@ -4357,7 +4370,7 @@ opr = LIR_OprFact::single_cpu(assigned_reg()); } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) { opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg); -#ifdef IA32 +#ifdef X86 } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) { opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg); #endif @@ -5435,7 +5448,7 @@ } bool LinearScanWalker::no_allocation_possible(Interval* cur) { -#ifdef IA32 +#ifdef X86 // fast calculation of intervals that can never get a register because the // the next instruction is a call that blocks all registers // Note: this does not work if callee-saved registers are available (e.g. on Sparc)