hotspot/src/share/vm/c1/c1_LinearScan.cpp
changeset 1066 717c3345024f
parent 1 489c9b5090e2
child 1217 5eb97f366a6a
equal deleted inserted replaced
1065:dbeb68f8a0ee 1066:717c3345024f
    78  , _has_info(0)
    78  , _has_info(0)
    79  , _has_call(0)
    79  , _has_call(0)
    80  , _scope_value_cache(0) // initialized later with correct length
    80  , _scope_value_cache(0) // initialized later with correct length
    81  , _interval_in_loop(0, 0) // initialized later with correct length
    81  , _interval_in_loop(0, 0) // initialized later with correct length
    82  , _cached_blocks(*ir->linear_scan_order())
    82  , _cached_blocks(*ir->linear_scan_order())
    83 #ifdef IA32
    83 #ifdef X86
    84  , _fpu_stack_allocator(NULL)
    84  , _fpu_stack_allocator(NULL)
    85 #endif
    85 #endif
    86 {
    86 {
    87   // note: to use more than on instance of LinearScan at a time this function call has to
    87   // note: to use more than on instance of LinearScan at a time this function call has to
    88   //       be moved somewhere outside of this constructor:
    88   //       be moved somewhere outside of this constructor:
   114     return opr->vreg_number();
   114     return opr->vreg_number();
   115   } else if (opr->is_single_cpu()) {
   115   } else if (opr->is_single_cpu()) {
   116     return opr->cpu_regnr();
   116     return opr->cpu_regnr();
   117   } else if (opr->is_double_cpu()) {
   117   } else if (opr->is_double_cpu()) {
   118     return opr->cpu_regnrLo();
   118     return opr->cpu_regnrLo();
   119 #ifdef IA32
   119 #ifdef X86
   120   } else if (opr->is_single_xmm()) {
   120   } else if (opr->is_single_xmm()) {
   121     return opr->fpu_regnr() + pd_first_xmm_reg;
   121     return opr->fpu_regnr() + pd_first_xmm_reg;
   122   } else if (opr->is_double_xmm()) {
   122   } else if (opr->is_double_xmm()) {
   123     return opr->fpu_regnrLo() + pd_first_xmm_reg;
   123     return opr->fpu_regnrLo() + pd_first_xmm_reg;
   124 #endif
   124 #endif
   126     return opr->fpu_regnr() + pd_first_fpu_reg;
   126     return opr->fpu_regnr() + pd_first_fpu_reg;
   127   } else if (opr->is_double_fpu()) {
   127   } else if (opr->is_double_fpu()) {
   128     return opr->fpu_regnrLo() + pd_first_fpu_reg;
   128     return opr->fpu_regnrLo() + pd_first_fpu_reg;
   129   } else {
   129   } else {
   130     ShouldNotReachHere();
   130     ShouldNotReachHere();
       
   131     return -1;
   131   }
   132   }
   132 }
   133 }
   133 
   134 
   134 int LinearScan::reg_numHi(LIR_Opr opr) {
   135 int LinearScan::reg_numHi(LIR_Opr opr) {
   135   assert(opr->is_register(), "should not call this otherwise");
   136   assert(opr->is_register(), "should not call this otherwise");
   138     return -1;
   139     return -1;
   139   } else if (opr->is_single_cpu()) {
   140   } else if (opr->is_single_cpu()) {
   140     return -1;
   141     return -1;
   141   } else if (opr->is_double_cpu()) {
   142   } else if (opr->is_double_cpu()) {
   142     return opr->cpu_regnrHi();
   143     return opr->cpu_regnrHi();
   143 #ifdef IA32
   144 #ifdef X86
   144   } else if (opr->is_single_xmm()) {
   145   } else if (opr->is_single_xmm()) {
   145     return -1;
   146     return -1;
   146   } else if (opr->is_double_xmm()) {
   147   } else if (opr->is_double_xmm()) {
   147     return -1;
   148     return -1;
   148 #endif
   149 #endif
   150     return -1;
   151     return -1;
   151   } else if (opr->is_double_fpu()) {
   152   } else if (opr->is_double_fpu()) {
   152     return opr->fpu_regnrHi() + pd_first_fpu_reg;
   153     return opr->fpu_regnrHi() + pd_first_fpu_reg;
   153   } else {
   154   } else {
   154     ShouldNotReachHere();
   155     ShouldNotReachHere();
       
   156     return -1;
   155   }
   157   }
   156 }
   158 }
   157 
   159 
   158 
   160 
   159 // ********** functions for classification of intervals
   161 // ********** functions for classification of intervals
  1061       return shouldHaveRegister;
  1063       return shouldHaveRegister;
  1062     }
  1064     }
  1063   }
  1065   }
  1064 
  1066 
  1065 
  1067 
  1066 #ifdef IA32
  1068 #ifdef X86
  1067   if (op->code() == lir_cmove) {
  1069   if (op->code() == lir_cmove) {
  1068     // conditional moves can handle stack operands
  1070     // conditional moves can handle stack operands
  1069     assert(op->result_opr()->is_register(), "result must always be in a register");
  1071     assert(op->result_opr()->is_register(), "result must always be in a register");
  1070     return shouldHaveRegister;
  1072     return shouldHaveRegister;
  1071   }
  1073   }
  1126           return shouldHaveRegister;
  1128           return shouldHaveRegister;
  1127         }
  1129         }
  1128       }
  1130       }
  1129     }
  1131     }
  1130   }
  1132   }
  1131 #endif // IA32
  1133 #endif // X86
  1132 
  1134 
  1133   // all other operands require a register
  1135   // all other operands require a register
  1134   return mustHaveRegister;
  1136   return mustHaveRegister;
  1135 }
  1137 }
  1136 
  1138 
  1259 
  1261 
  1260   // temp ranges for fpu registers are only created when the method has
  1262   // temp ranges for fpu registers are only created when the method has
  1261   // virtual fpu operands. Otherwise no allocation for fpu registers is
  1263   // virtual fpu operands. Otherwise no allocation for fpu registers is
  1262   // perfomed and so the temp ranges would be useless
  1264   // perfomed and so the temp ranges would be useless
  1263   if (has_fpu_registers()) {
  1265   if (has_fpu_registers()) {
  1264 #ifdef IA32
  1266 #ifdef X86
  1265     if (UseSSE < 2) {
  1267     if (UseSSE < 2) {
  1266 #endif
  1268 #endif
  1267       for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
  1269       for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
  1268         LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
  1270         LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
  1269         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
  1271         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
  1270         assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
  1272         assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
  1271         caller_save_registers[num_caller_save_registers++] = reg_num(opr);
  1273         caller_save_registers[num_caller_save_registers++] = reg_num(opr);
  1272       }
  1274       }
  1273 #ifdef IA32
  1275 #ifdef X86
  1274     }
  1276     }
  1275     if (UseSSE > 0) {
  1277     if (UseSSE > 0) {
  1276       for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
  1278       for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
  1277         LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(i);
  1279         LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(i);
  1278         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
  1280         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
  1297     assert(block_from == instructions->at(0)->id(), "must be");
  1299     assert(block_from == instructions->at(0)->id(), "must be");
  1298     assert(block_to   == instructions->at(instructions->length() - 1)->id(), "must be");
  1300     assert(block_to   == instructions->at(instructions->length() - 1)->id(), "must be");
  1299 
  1301 
  1300     // Update intervals for registers live at the end of this block;
  1302     // Update intervals for registers live at the end of this block;
  1301     BitMap live = block->live_out();
  1303     BitMap live = block->live_out();
  1302     int size = live.size();
  1304     int size = (int)live.size();
  1303     for (int number = live.get_next_one_offset(0, size); number < size; number = live.get_next_one_offset(number + 1, size)) {
  1305     for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) {
  1304       assert(live.at(number), "should not stop here otherwise");
  1306       assert(live.at(number), "should not stop here otherwise");
  1305       assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds");
  1307       assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds");
  1306       TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2));
  1308       TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2));
  1307 
  1309 
  1308       add_use(number, block_from, block_to + 2, noUse, T_ILLEGAL);
  1310       add_use(number, block_from, block_to + 2, noUse, T_ILLEGAL);
  1652   const int num_regs = num_virtual_regs();
  1654   const int num_regs = num_virtual_regs();
  1653   const int size = live_set_size();
  1655   const int size = live_set_size();
  1654   const BitMap live_at_edge = to_block->live_in();
  1656   const BitMap live_at_edge = to_block->live_in();
  1655 
  1657 
  1656   // visit all registers where the live_at_edge bit is set
  1658   // visit all registers where the live_at_edge bit is set
  1657   for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) {
  1659   for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
  1658     assert(r < num_regs, "live information set for not exisiting interval");
  1660     assert(r < num_regs, "live information set for not exisiting interval");
  1659     assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge");
  1661     assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge");
  1660 
  1662 
  1661     Interval* from_interval = interval_at_block_end(from_block, r);
  1663     Interval* from_interval = interval_at_block_end(from_block, r);
  1662     Interval* to_interval = interval_at_block_begin(to_block, r);
  1664     Interval* to_interval = interval_at_block_begin(to_block, r);
  1822   assert(block->is_set(BlockBegin::exception_entry_flag), "should not call otherwise");
  1824   assert(block->is_set(BlockBegin::exception_entry_flag), "should not call otherwise");
  1823   DEBUG_ONLY(move_resolver.check_empty());
  1825   DEBUG_ONLY(move_resolver.check_empty());
  1824 
  1826 
  1825   // visit all registers where the live_in bit is set
  1827   // visit all registers where the live_in bit is set
  1826   int size = live_set_size();
  1828   int size = live_set_size();
  1827   for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) {
  1829   for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
  1828     resolve_exception_entry(block, r, move_resolver);
  1830     resolve_exception_entry(block, r, move_resolver);
  1829   }
  1831   }
  1830 
  1832 
  1831   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
  1833   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
  1832   for_each_phi_fun(block, phi,
  1834   for_each_phi_fun(block, phi,
  1896   assert(handler->entry_code() == NULL, "code already present");
  1898   assert(handler->entry_code() == NULL, "code already present");
  1897 
  1899 
  1898   // visit all registers where the live_in bit is set
  1900   // visit all registers where the live_in bit is set
  1899   BlockBegin* block = handler->entry_block();
  1901   BlockBegin* block = handler->entry_block();
  1900   int size = live_set_size();
  1902   int size = live_set_size();
  1901   for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) {
  1903   for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
  1902     resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver);
  1904     resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver);
  1903   }
  1905   }
  1904 
  1906 
  1905   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
  1907   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
  1906   for_each_phi_fun(block, phi,
  1908   for_each_phi_fun(block, phi,
  2030         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
  2032         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
  2031         if (requires_adjacent_regs(T_LONG)) {
  2033         if (requires_adjacent_regs(T_LONG)) {
  2032           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
  2034           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
  2033         }
  2035         }
  2034 
  2036 
  2035 #ifdef SPARC
       
  2036 #ifdef _LP64
  2037 #ifdef _LP64
  2037         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
  2038         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
  2038 #else
  2039 #else
       
  2040 #ifdef SPARC
  2039         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
  2041         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
  2040 #endif
       
  2041 #else
  2042 #else
  2042         return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
  2043         return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
  2043 #endif
  2044 #endif // SPARC
       
  2045 #endif // LP64
  2044       }
  2046       }
  2045 
  2047 
  2046       case T_FLOAT: {
  2048       case T_FLOAT: {
  2047 #ifdef IA32
  2049 #ifdef X86
  2048         if (UseSSE >= 1) {
  2050         if (UseSSE >= 1) {
  2049           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
  2051           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
  2050           assert(interval->assigned_regHi() == any_reg, "must not have hi register");
  2052           assert(interval->assigned_regHi() == any_reg, "must not have hi register");
  2051           return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
  2053           return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
  2052         }
  2054         }
  2056         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
  2058         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
  2057         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
  2059         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
  2058       }
  2060       }
  2059 
  2061 
  2060       case T_DOUBLE: {
  2062       case T_DOUBLE: {
  2061 #ifdef IA32
  2063 #ifdef X86
  2062         if (UseSSE >= 2) {
  2064         if (UseSSE >= 2) {
  2063           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
  2065           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
  2064           assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
  2066           assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
  2065           return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
  2067           return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
  2066         }
  2068         }
  2120     interval = split_child_at_op_id(interval, op_id, mode);
  2122     interval = split_child_at_op_id(interval, op_id, mode);
  2121   }
  2123   }
  2122 
  2124 
  2123   LIR_Opr res = operand_for_interval(interval);
  2125   LIR_Opr res = operand_for_interval(interval);
  2124 
  2126 
  2125 #ifdef IA32
  2127 #ifdef X86
  2126   // new semantic for is_last_use: not only set on definite end of interval,
  2128   // new semantic for is_last_use: not only set on definite end of interval,
  2127   // but also before hole
  2129   // but also before hole
  2128   // This may still miss some cases (e.g. for dead values), but it is not necessary that the
  2130   // This may still miss some cases (e.g. for dead values), but it is not necessary that the
  2129   // last use information is completely correct
  2131   // last use information is completely correct
  2130   // information is only needed for fpu stack allocation
  2132   // information is only needed for fpu stack allocation
  2473       return 2;
  2475       return 2;
  2474     }
  2476     }
  2475 
  2477 
  2476     default:
  2478     default:
  2477       ShouldNotReachHere();
  2479       ShouldNotReachHere();
       
  2480       return -1;
  2478   }
  2481   }
  2479 }
  2482 }
  2480 
  2483 
  2481 int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) {
  2484 int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) {
  2482   if (opr->is_single_stack()) {
  2485   if (opr->is_single_stack()) {
  2513     DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : Location::normal, frame_map()->regname(opr)))));
  2516     DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : Location::normal, frame_map()->regname(opr)))));
  2514 
  2517 
  2515     scope_values->append(sv);
  2518     scope_values->append(sv);
  2516     return 1;
  2519     return 1;
  2517 
  2520 
  2518 #ifdef IA32
  2521 #ifdef X86
  2519   } else if (opr->is_single_xmm()) {
  2522   } else if (opr->is_single_xmm()) {
  2520     VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
  2523     VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
  2521     LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
  2524     LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
  2522 
  2525 
  2523     scope_values->append(sv);
  2526     scope_values->append(sv);
  2524     return 1;
  2527     return 1;
  2525 #endif
  2528 #endif
  2526 
  2529 
  2527   } else if (opr->is_single_fpu()) {
  2530   } else if (opr->is_single_fpu()) {
  2528 #ifdef IA32
  2531 #ifdef X86
  2529     // the exact location of fpu stack values is only known
  2532     // the exact location of fpu stack values is only known
  2530     // during fpu stack allocation, so the stack allocator object
  2533     // during fpu stack allocation, so the stack allocator object
  2531     // must be present
  2534     // must be present
  2532     assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
  2535     assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
  2533     assert(_fpu_stack_allocator != NULL, "must be present");
  2536     assert(_fpu_stack_allocator != NULL, "must be present");
  2546 
  2549 
  2547     ScopeValue* first;
  2550     ScopeValue* first;
  2548     ScopeValue* second;
  2551     ScopeValue* second;
  2549 
  2552 
  2550     if (opr->is_double_stack()) {
  2553     if (opr->is_double_stack()) {
       
  2554 #ifdef _LP64
       
  2555       Location loc1;
       
  2556       Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl;
       
  2557       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) {
       
  2558         bailout("too large frame");
       
  2559       }
       
  2560       // Does this reverse on x86 vs. sparc?
       
  2561       first =  new LocationValue(loc1);
       
  2562       second = &_int_0_scope_value;
       
  2563 #else
  2551       Location loc1, loc2;
  2564       Location loc1, loc2;
  2552       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
  2565       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
  2553         bailout("too large frame");
  2566         bailout("too large frame");
  2554       }
  2567       }
  2555       first =  new LocationValue(loc1);
  2568       first =  new LocationValue(loc1);
  2556       second = new LocationValue(loc2);
  2569       second = new LocationValue(loc2);
       
  2570 #endif // _LP64
  2557 
  2571 
  2558     } else if (opr->is_double_cpu()) {
  2572     } else if (opr->is_double_cpu()) {
  2559 #ifdef _LP64
  2573 #ifdef _LP64
  2560       VMReg rname_first = opr->as_register_lo()->as_VMReg();
  2574       VMReg rname_first = opr->as_register_lo()->as_VMReg();
  2561       first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first));
  2575       first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first));
  2571         rname_second = tmp;
  2585         rname_second = tmp;
  2572       }
  2586       }
  2573 
  2587 
  2574       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
  2588       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
  2575       second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
  2589       second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
  2576 #endif
  2590 #endif //_LP64
  2577 
  2591 
  2578 #ifdef IA32
  2592 
       
  2593 #ifdef X86
  2579     } else if (opr->is_double_xmm()) {
  2594     } else if (opr->is_double_xmm()) {
  2580       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
  2595       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
  2581       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
  2596       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
  2582       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
  2597       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
  2583       // %%% This is probably a waste but we'll keep things as they were for now
  2598       // %%% This is probably a waste but we'll keep things as they were for now
  2587       }
  2602       }
  2588 #endif
  2603 #endif
  2589 
  2604 
  2590     } else if (opr->is_double_fpu()) {
  2605     } else if (opr->is_double_fpu()) {
  2591       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
  2606       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
  2592       // the double as float registers in the native ordering. On IA32,
  2607       // the double as float registers in the native ordering. On X86,
  2593       // fpu_regnrLo is a FPU stack slot whose VMReg represents
  2608       // fpu_regnrLo is a FPU stack slot whose VMReg represents
  2594       // the low-order word of the double and fpu_regnrLo + 1 is the
  2609       // the low-order word of the double and fpu_regnrLo + 1 is the
  2595       // name for the other half.  *first and *second must represent the
  2610       // name for the other half.  *first and *second must represent the
  2596       // least and most significant words, respectively.
  2611       // least and most significant words, respectively.
  2597 
  2612 
  2598 #ifdef IA32
  2613 #ifdef X86
  2599       // the exact location of fpu stack values is only known
  2614       // the exact location of fpu stack values is only known
  2600       // during fpu stack allocation, so the stack allocator object
  2615       // during fpu stack allocation, so the stack allocator object
  2601       // must be present
  2616       // must be present
  2602       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
  2617       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
  2603       assert(_fpu_stack_allocator != NULL, "must be present");
  2618       assert(_fpu_stack_allocator != NULL, "must be present");
  2863 #ifdef ASSERT
  2878 #ifdef ASSERT
  2864     // make sure we haven't made the op invalid.
  2879     // make sure we haven't made the op invalid.
  2865     op->verify();
  2880     op->verify();
  2866 #endif
  2881 #endif
  2867 
  2882 
  2868 #ifndef _LP64
       
  2869     // remove useless moves
  2883     // remove useless moves
  2870     if (op->code() == lir_move) {
  2884     if (op->code() == lir_move) {
  2871       assert(op->as_Op1() != NULL, "move must be LIR_Op1");
  2885       assert(op->as_Op1() != NULL, "move must be LIR_Op1");
  2872       LIR_Op1* move = (LIR_Op1*)op;
  2886       LIR_Op1* move = (LIR_Op1*)op;
  2873       LIR_Opr src = move->in_opr();
  2887       LIR_Opr src = move->in_opr();
  2877           src->is_same_register(dst)) {
  2891           src->is_same_register(dst)) {
  2878         instructions->at_put(j, NULL);
  2892         instructions->at_put(j, NULL);
  2879         has_dead = true;
  2893         has_dead = true;
  2880       }
  2894       }
  2881     }
  2895     }
  2882 #endif
       
  2883   }
  2896   }
  2884 
  2897 
  2885   if (has_dead) {
  2898   if (has_dead) {
  2886     // iterate all instructions of the block and remove all null-values.
  2899     // iterate all instructions of the block and remove all null-values.
  2887     int insert_point = 0;
  2900     int insert_point = 0;
  3190   for (int i = 0; i < num_blocks; i++) {
  3203   for (int i = 0; i < num_blocks; i++) {
  3191     BlockBegin* block = block_at(i);
  3204     BlockBegin* block = block_at(i);
  3192     BitMap live_at_edge = block->live_in();
  3205     BitMap live_at_edge = block->live_in();
  3193 
  3206 
  3194     // visit all registers where the live_at_edge bit is set
  3207     // visit all registers where the live_at_edge bit is set
  3195     for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) {
  3208     for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
  3196       TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id()));
  3209       TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id()));
  3197 
  3210 
  3198       Value value = gen()->instruction_for_vreg(r);
  3211       Value value = gen()->instruction_for_vreg(r);
  3199 
  3212 
  3200       assert(value != NULL, "all intervals live across block boundaries must have Value");
  3213       assert(value != NULL, "all intervals live across block boundaries must have Value");
  3436       }
  3449       }
  3437       for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
  3450       for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
  3438         state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
  3451         state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
  3439       }
  3452       }
  3440 
  3453 
  3441 #ifdef IA32
  3454 #ifdef X86
  3442       for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
  3455       for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
  3443         state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
  3456         state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
  3444       }
  3457       }
  3445 #endif
  3458 #endif
  3446     }
  3459     }
  4355     // need a temporary operand for fixed intervals because type() cannot be called
  4368     // need a temporary operand for fixed intervals because type() cannot be called
  4356     if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) {
  4369     if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) {
  4357       opr = LIR_OprFact::single_cpu(assigned_reg());
  4370       opr = LIR_OprFact::single_cpu(assigned_reg());
  4358     } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
  4371     } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
  4359       opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
  4372       opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
  4360 #ifdef IA32
  4373 #ifdef X86
  4361     } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
  4374     } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
  4362       opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
  4375       opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
  4363 #endif
  4376 #endif
  4364     } else {
  4377     } else {
  4365       ShouldNotReachHere();
  4378       ShouldNotReachHere();
  5433     split_and_spill_intersecting_intervals(reg, regHi);
  5446     split_and_spill_intersecting_intervals(reg, regHi);
  5434   }
  5447   }
  5435 }
  5448 }
  5436 
  5449 
  5437 bool LinearScanWalker::no_allocation_possible(Interval* cur) {
  5450 bool LinearScanWalker::no_allocation_possible(Interval* cur) {
  5438 #ifdef IA32
  5451 #ifdef X86
  5439   // fast calculation of intervals that can never get a register because the
  5452   // fast calculation of intervals that can never get a register because the
  5440   // the next instruction is a call that blocks all registers
  5453   // the next instruction is a call that blocks all registers
  5441   // Note: this does not work if callee-saved registers are available (e.g. on Sparc)
  5454   // Note: this does not work if callee-saved registers are available (e.g. on Sparc)
  5442 
  5455 
  5443   // check if this interval is the result of a split operation
  5456   // check if this interval is the result of a split operation