hotspot/src/share/vm/opto/chaitin.cpp
changeset 17013 22a05c7f3314
parent 14623 70c4c1be0a14
child 18099 45973b036c3e
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Mon Apr 15 16:20:05 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Tue Apr 16 10:08:41 2013 +0200
@@ -145,6 +145,72 @@
 
 #define NUMBUCKS 3
 
+// Straight out of Tarjan's union-find algorithm
+uint LiveRangeMap::find_compress(uint lrg) {
+  uint cur = lrg;
+  uint next = _uf_map[cur];
+  while (next != cur) { // Scan chain of equivalences
+    assert( next < cur, "always union smaller");
+    cur = next; // until find a fixed-point
+    next = _uf_map[cur];
+  }
+
+  // Core of union-find algorithm: update chain of
+  // equivalences to be equal to the root.
+  while (lrg != next) {
+    uint tmp = _uf_map[lrg];
+    _uf_map.map(lrg, next);
+    lrg = tmp;
+  }
+  return lrg;
+}
+
+// Reset the Union-Find map to identity
+void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
+  _max_lrg_id= max_lrg_id;
+  // Force the Union-Find mapping to be at least this large
+  _uf_map.extend(_max_lrg_id, 0);
+  // Initialize it to be the ID mapping.
+  for (uint i = 0; i < _max_lrg_id; ++i) {
+    _uf_map.map(i, i);
+  }
+}
+
+// Make all Nodes map directly to their final live range; no need for
+// the Union-Find mapping after this call.
+void LiveRangeMap::compress_uf_map_for_nodes() {
+  // For all Nodes, compress mapping
+  uint unique = _names.Size();
+  for (uint i = 0; i < unique; ++i) {
+    uint lrg = _names[i];
+    uint compressed_lrg = find(lrg);
+    if (lrg != compressed_lrg) {
+      _names.map(i, compressed_lrg);
+    }
+  }
+}
+
+// Like Find above, but no path compress, so bad asymptotic behavior
+uint LiveRangeMap::find_const(uint lrg) const {
+  if (!lrg) {
+    return lrg; // Ignore the zero LRG
+  }
+
+  // Off the end?  This happens during debugging dumps when you got
+  // brand new live ranges but have not told the allocator yet.
+  if (lrg >= _max_lrg_id) {
+    return lrg;
+  }
+
+  uint next = _uf_map[lrg];
+  while (next != lrg) { // Scan chain of equivalences
+    assert(next < lrg, "always union smaller");
+    lrg = next; // until find a fixed-point
+    next = _uf_map[lrg];
+  }
+  return next;
+}
+
 //------------------------------Chaitin----------------------------------------
 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
   : PhaseRegAlloc(unique, cfg, matcher,
@@ -153,13 +219,13 @@
 #else
        NULL
 #endif
-       ),
-    _names(unique), _uf_map(unique),
-    _maxlrg(0), _live(0),
-    _spilled_once(Thread::current()->resource_area()),
-    _spilled_twice(Thread::current()->resource_area()),
-    _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0),
-    _oldphi(unique)
+       )
+  , _lrg_map(unique)
+  , _live(0)
+  , _spilled_once(Thread::current()->resource_area())
+  , _spilled_twice(Thread::current()->resource_area())
+  , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
+  , _oldphi(unique)
 #ifndef PRODUCT
   , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
 #endif
@@ -168,7 +234,6 @@
 
   _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq);
 
-  uint i,j;
   // Build a list of basic blocks, sorted by frequency
   _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
   // Experiment with sorting strategies to speed compilation
@@ -176,30 +241,30 @@
   Block **buckets[NUMBUCKS];             // Array of buckets
   uint    buckcnt[NUMBUCKS];             // Array of bucket counters
   double  buckval[NUMBUCKS];             // Array of bucket value cutoffs
-  for( i = 0; i < NUMBUCKS; i++ ) {
-    buckets[i] = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
+  for (uint i = 0; i < NUMBUCKS; i++) {
+    buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks);
     buckcnt[i] = 0;
     // Bump by three orders of magnitude each time
     cutoff *= 0.001;
     buckval[i] = cutoff;
-    for( j = 0; j < _cfg._num_blocks; j++ ) {
+    for (uint j = 0; j < _cfg._num_blocks; j++) {
       buckets[i][j] = NULL;
     }
   }
   // Sort blocks into buckets
-  for( i = 0; i < _cfg._num_blocks; i++ ) {
-    for( j = 0; j < NUMBUCKS; j++ ) {
-      if( (j == NUMBUCKS-1) || (_cfg._blocks[i]->_freq > buckval[j]) ) {
+  for (uint i = 0; i < _cfg._num_blocks; i++) {
+    for (uint j = 0; j < NUMBUCKS; j++) {
+      if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) {
         // Assign block to end of list for appropriate bucket
         buckets[j][buckcnt[j]++] = _cfg._blocks[i];
-        break;                      // kick out of inner loop
+        break; // kick out of inner loop
       }
     }
   }
   // Dump buckets into final block array
   uint blkcnt = 0;
-  for( i = 0; i < NUMBUCKS; i++ ) {
-    for( j = 0; j < buckcnt[i]; j++ ) {
+  for (uint i = 0; i < NUMBUCKS; i++) {
+    for (uint j = 0; j < buckcnt[i]; j++) {
       _blks[blkcnt++] = buckets[i][j];
     }
   }
@@ -207,6 +272,77 @@
   assert(blkcnt == _cfg._num_blocks, "Block array not totally filled");
 }
 
+//------------------------------Union------------------------------------------
+// union 2 sets together.
+void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
+  uint src = _lrg_map.find(src_n);
+  uint dst = _lrg_map.find(dst_n);
+  assert(src, "");
+  assert(dst, "");
+  assert(src < _lrg_map.max_lrg_id(), "oob");
+  assert(dst < _lrg_map.max_lrg_id(), "oob");
+  assert(src < dst, "always union smaller");
+  _lrg_map.uf_map(dst, src);
+}
+
+//------------------------------new_lrg----------------------------------------
+void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
+  // Make the Node->LRG mapping
+  _lrg_map.extend(x->_idx,lrg);
+  // Make the Union-Find mapping an identity function
+  _lrg_map.uf_extend(lrg, lrg);
+}
+
+
+bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
+  Block *bcon = _cfg._bbs[con->_idx];
+  uint cindex = bcon->find_node(con);
+  Node *con_next = bcon->_nodes[cindex+1];
+  if (con_next->in(0) != con || !con_next->is_MachProj()) {
+    return false;               // No MachProj's follow
+  }
+
+  // Copy kills after the cloned constant
+  Node *kills = con_next->clone();
+  kills->set_req(0, copy);
+  b->_nodes.insert(idx, kills);
+  _cfg._bbs.map(kills->_idx, b);
+  new_lrg(kills, max_lrg_id);
+  return true;
+}
+
+//------------------------------compact----------------------------------------
+// Renumber the live ranges to compact them.  Makes the IFG smaller.
+void PhaseChaitin::compact() {
+  // Current the _uf_map contains a series of short chains which are headed
+  // by a self-cycle.  All the chains run from big numbers to little numbers.
+  // The Find() call chases the chains & shortens them for the next Find call.
+  // We are going to change this structure slightly.  Numbers above a moving
+  // wave 'i' are unchanged.  Numbers below 'j' point directly to their
+  // compacted live range with no further chaining.  There are no chains or
+  // cycles below 'i', so the Find call no longer works.
+  uint j=1;
+  uint i;
+  for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
+    uint lr = _lrg_map.uf_live_range_id(i);
+    // Ignore unallocated live ranges
+    if (!lr) {
+      continue;
+    }
+    assert(lr <= i, "");
+    _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
+  }
+  // Now change the Node->LR mapping to reflect the compacted names
+  uint unique = _lrg_map.size();
+  for (i = 0; i < unique; i++) {
+    uint lrg_id = _lrg_map.live_range_id(i);
+    _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
+  }
+
+  // Reset the Union-Find mapping
+  _lrg_map.reset_uf_map(j);
+}
+
 void PhaseChaitin::Register_Allocate() {
 
   // Above the OLD FP (and in registers) are the incoming arguments.  Stack
@@ -231,14 +367,12 @@
   // all copy-related live ranges low and then using the max copy-related
   // live range as a cut-off for LIVE and the IFG.  In other words, I can
   // build a subset of LIVE and IFG just for copies.
-  PhaseLive live(_cfg,_names,&live_arena);
+  PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
 
   // Need IFG for coalescing and coloring
-  PhaseIFG ifg( &live_arena );
+  PhaseIFG ifg(&live_arena);
   _ifg = &ifg;
 
-  if (C->unique() > _names.Size())  _names.extend(C->unique()-1, 0);
-
   // Come out of SSA world to the Named world.  Assign (virtual) registers to
   // Nodes.  Use the same register for all inputs and the output of PhiNodes
   // - effectively ending SSA form.  This requires either coalescing live
@@ -258,9 +392,9 @@
     _live = NULL;                 // Mark live as being not available
     rm.reset_to_mark();           // Reclaim working storage
     IndexSet::reset_memory(C, &live_arena);
-    ifg.init(_maxlrg);            // Empty IFG
+    ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
     gather_lrg_masks( false );    // Collect LRG masks
-    live.compute( _maxlrg );      // Compute liveness
+    live.compute(_lrg_map.max_lrg_id()); // Compute liveness
     _live = &live;                // Mark LIVE as being available
   }
 
@@ -270,19 +404,19 @@
   // across any GC point where the derived value is live.  So this code looks
   // at all the GC points, and "stretches" the live range of any base pointer
   // to the GC point.
-  if( stretch_base_pointer_live_ranges(&live_arena) ) {
-    NOT_PRODUCT( Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler); )
+  if (stretch_base_pointer_live_ranges(&live_arena)) {
+    NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);)
     // Since some live range stretched, I need to recompute live
     _live = NULL;
     rm.reset_to_mark();         // Reclaim working storage
     IndexSet::reset_memory(C, &live_arena);
-    ifg.init(_maxlrg);
-    gather_lrg_masks( false );
-    live.compute( _maxlrg );
+    ifg.init(_lrg_map.max_lrg_id());
+    gather_lrg_masks(false);
+    live.compute(_lrg_map.max_lrg_id());
     _live = &live;
   }
   // Create the interference graph using virtual copies
-  build_ifg_virtual( );  // Include stack slots this time
+  build_ifg_virtual();  // Include stack slots this time
 
   // Aggressive (but pessimistic) copy coalescing.
   // This pass works on virtual copies.  Any virtual copies which are not
@@ -296,8 +430,8 @@
     // given Node and search them for an instance, i.e., time O(#MaxLRG)).
     _ifg->SquareUp();
 
-    PhaseAggressiveCoalesce coalesce( *this );
-    coalesce.coalesce_driver( );
+    PhaseAggressiveCoalesce coalesce(*this);
+    coalesce.coalesce_driver();
     // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
     // not match the Phi itself, insert a copy.
     coalesce.insert_copies(_matcher);
@@ -310,28 +444,36 @@
     _live = NULL;
     rm.reset_to_mark();           // Reclaim working storage
     IndexSet::reset_memory(C, &live_arena);
-    ifg.init(_maxlrg);
+    ifg.init(_lrg_map.max_lrg_id());
     gather_lrg_masks( true );
-    live.compute( _maxlrg );
+    live.compute(_lrg_map.max_lrg_id());
     _live = &live;
   }
 
   // Build physical interference graph
   uint must_spill = 0;
-  must_spill = build_ifg_physical( &live_arena );
+  must_spill = build_ifg_physical(&live_arena);
   // If we have a guaranteed spill, might as well spill now
-  if( must_spill ) {
-    if( !_maxlrg ) return;
+  if (must_spill) {
+    if(!_lrg_map.max_lrg_id()) {
+      return;
+    }
     // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
     C->check_node_count(10*must_spill, "out of nodes before split");
-    if (C->failing())  return;
-    _maxlrg = Split(_maxlrg, &split_arena);  // Split spilling LRG everywhere
+    if (C->failing()) {
+      return;
+    }
+
+    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
+    _lrg_map.set_max_lrg_id(new_max_lrg_id);
     // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
     // or we failed to split
     C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
-    if (C->failing())  return;
+    if (C->failing()) {
+      return;
+    }
 
-    NOT_PRODUCT( C->verify_graph_edges(); )
+    NOT_PRODUCT(C->verify_graph_edges();)
 
     compact();                  // Compact LRGs; return new lower max lrg
 
@@ -340,23 +482,23 @@
       _live = NULL;
       rm.reset_to_mark();         // Reclaim working storage
       IndexSet::reset_memory(C, &live_arena);
-      ifg.init(_maxlrg);          // Build a new interference graph
+      ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
       gather_lrg_masks( true );   // Collect intersect mask
-      live.compute( _maxlrg );    // Compute LIVE
+      live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
       _live = &live;
     }
-    build_ifg_physical( &live_arena );
+    build_ifg_physical(&live_arena);
     _ifg->SquareUp();
     _ifg->Compute_Effective_Degree();
     // Only do conservative coalescing if requested
-    if( OptoCoalesce ) {
+    if (OptoCoalesce) {
       // Conservative (and pessimistic) copy coalescing of those spills
-      PhaseConservativeCoalesce coalesce( *this );
+      PhaseConservativeCoalesce coalesce(*this);
       // If max live ranges greater than cutoff, don't color the stack.
       // This cutoff can be larger than below since it is only done once.
-      coalesce.coalesce_driver( );
+      coalesce.coalesce_driver();
     }
-    compress_uf_map_for_nodes();
+    _lrg_map.compress_uf_map_for_nodes();
 
 #ifdef ASSERT
     verify(&live_arena, true);
@@ -390,13 +532,18 @@
       }
     }
 
-    if( !_maxlrg ) return;
-    _maxlrg = Split(_maxlrg, &split_arena);  // Split spilling LRG everywhere
+    if (!_lrg_map.max_lrg_id()) {
+      return;
+    }
+    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
+    _lrg_map.set_max_lrg_id(new_max_lrg_id);
     // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
-    C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split");
-    if (C->failing())  return;
+    C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
+    if (C->failing()) {
+      return;
+    }
 
-    compact();                  // Compact LRGs; return new lower max lrg
+    compact(); // Compact LRGs; return new lower max lrg
 
     // Nuke the live-ness and interference graph and LiveRanGe info
     {
@@ -404,26 +551,26 @@
       _live = NULL;
       rm.reset_to_mark();         // Reclaim working storage
       IndexSet::reset_memory(C, &live_arena);
-      ifg.init(_maxlrg);
+      ifg.init(_lrg_map.max_lrg_id());
 
       // Create LiveRanGe array.
       // Intersect register masks for all USEs and DEFs
-      gather_lrg_masks( true );
-      live.compute( _maxlrg );
+      gather_lrg_masks(true);
+      live.compute(_lrg_map.max_lrg_id());
       _live = &live;
     }
-    must_spill = build_ifg_physical( &live_arena );
+    must_spill = build_ifg_physical(&live_arena);
     _ifg->SquareUp();
     _ifg->Compute_Effective_Degree();
 
     // Only do conservative coalescing if requested
-    if( OptoCoalesce ) {
+    if (OptoCoalesce) {
       // Conservative (and pessimistic) copy coalescing
-      PhaseConservativeCoalesce coalesce( *this );
+      PhaseConservativeCoalesce coalesce(*this);
       // Check for few live ranges determines how aggressive coalesce is.
-      coalesce.coalesce_driver( );
+      coalesce.coalesce_driver();
     }
-    compress_uf_map_for_nodes();
+    _lrg_map.compress_uf_map_for_nodes();
 #ifdef ASSERT
     verify(&live_arena, true);
 #endif
@@ -435,7 +582,7 @@
 
     // Select colors by re-inserting LRGs back into the IFG in reverse order.
     // Return whether or not something spills.
-    spills = Select( );
+    spills = Select();
   }
 
   // Count number of Simplify-Select trips per coloring success.
@@ -452,9 +599,12 @@
 
   // max_reg is past the largest *register* used.
   // Convert that to a frame_slot number.
-  if( _max_reg <= _matcher._new_SP )
+  if (_max_reg <= _matcher._new_SP) {
     _framesize = C->out_preserve_stack_slots();
-  else _framesize = _max_reg -_matcher._new_SP;
+  }
+  else {
+    _framesize = _max_reg -_matcher._new_SP;
+  }
   assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
 
   // This frame must preserve the required fp alignment
@@ -462,8 +612,9 @@
   assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
 #ifndef PRODUCT
   _total_framesize += _framesize;
-  if( (int)_framesize > _max_framesize )
+  if ((int)_framesize > _max_framesize) {
     _max_framesize = _framesize;
+  }
 #endif
 
   // Convert CISC spills
@@ -475,15 +626,17 @@
     log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
   }
 
-  if (C->failing())  return;
+  if (C->failing()) {
+    return;
+  }
 
-  NOT_PRODUCT( C->verify_graph_edges(); )
+  NOT_PRODUCT(C->verify_graph_edges();)
 
   // Move important info out of the live_arena to longer lasting storage.
-  alloc_node_regs(_names.Size());
-  for (uint i=0; i < _names.Size(); i++) {
-    if (_names[i]) {           // Live range associated with Node?
-      LRG &lrg = lrgs(_names[i]);
+  alloc_node_regs(_lrg_map.size());
+  for (uint i=0; i < _lrg_map.size(); i++) {
+    if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
+      LRG &lrg = lrgs(_lrg_map.live_range_id(i));
       if (!lrg.alive()) {
         set_bad(i);
       } else if (lrg.num_regs() == 1) {
@@ -537,11 +690,11 @@
       Node *n = b->_nodes[j];
       // Pre-color to the zero live range, or pick virtual register
       const RegMask &rm = n->out_RegMask();
-      _names.map( n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0 );
+      _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
     }
   }
   // Reset the Union-Find mapping to be identity
-  reset_uf_map(lr_counter);
+  _lrg_map.reset_uf_map(lr_counter);
 }
 
 
@@ -551,7 +704,7 @@
 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
 
   // Nail down the frame pointer live range
-  uint fp_lrg = n2lidx(_cfg._root->in(1)->in(TypeFunc::FramePtr));
+  uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr));
   lrgs(fp_lrg)._cost += 1e12;   // Cost is infinite
 
   // For all blocks
@@ -566,14 +719,14 @@
       uint idx = n->is_Copy();
 
       // Get virtual register number, same as LiveRanGe index
-      uint vreg = n2lidx(n);
+      uint vreg = _lrg_map.live_range_id(n);
       LRG &lrg = lrgs(vreg);
       if( vreg ) {              // No vreg means un-allocable (e.g. memory)
 
         // Collect has-copy bit
         if( idx ) {
           lrg._has_copy = 1;
-          uint clidx = n2lidx(n->in(idx));
+          uint clidx = _lrg_map.live_range_id(n->in(idx));
           LRG &copy_src = lrgs(clidx);
           copy_src._has_copy = 1;
         }
@@ -773,8 +926,10 @@
       }
       // Prepare register mask for each input
       for( uint k = input_edge_start; k < cnt; k++ ) {
-        uint vreg = n2lidx(n->in(k));
-        if( !vreg ) continue;
+        uint vreg = _lrg_map.live_range_id(n->in(k));
+        if (!vreg) {
+          continue;
+        }
 
         // If this instruction is CISC Spillable, add the flags
         // bit to its appropriate input
@@ -857,7 +1012,7 @@
   } // end for all blocks
 
   // Final per-liverange setup
-  for (uint i2=0; i2<_maxlrg; i2++) {
+  for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
     LRG &lrg = lrgs(i2);
     assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
     if (lrg.num_regs() > 1 && !lrg._fat_proj) {
@@ -879,7 +1034,7 @@
 // The bit is checked in Simplify.
 void PhaseChaitin::set_was_low() {
 #ifdef ASSERT
-  for( uint i = 1; i < _maxlrg; i++ ) {
+  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
     int size = lrgs(i).num_regs();
     uint old_was_lo = lrgs(i)._was_lo;
     lrgs(i)._was_lo = 0;
@@ -913,7 +1068,7 @@
 // Compute cost/area ratio, in case we spill.  Build the lo-degree list.
 void PhaseChaitin::cache_lrg_info( ) {
 
-  for( uint i = 1; i < _maxlrg; i++ ) {
+  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
     LRG &lrg = lrgs(i);
 
     // Check for being of low degree: means we can be trivially colored.
@@ -949,10 +1104,10 @@
 
   // Warm up the lo-degree no-copy list
   int lo_no_copy = 0;
-  for( uint i = 1; i < _maxlrg; i++ ) {
-    if( (lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
+  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
+    if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
         !lrgs(i).alive() ||
-        lrgs(i)._must_spill ) {
+        lrgs(i)._must_spill) {
       lrgs(i)._next = lo_no_copy;
       lo_no_copy = i;
     }
@@ -1163,7 +1318,7 @@
 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
 
   // Check for "at_risk" LRG's
-  uint risk_lrg = Find(lrg._risk_bias);
+  uint risk_lrg = _lrg_map.find(lrg._risk_bias);
   if( risk_lrg != 0 ) {
     // Walk the colored neighbors of the "at_risk" candidate
     // Choose a color which is both legal and already taken by a neighbor
@@ -1179,7 +1334,7 @@
     }
   }
 
-  uint copy_lrg = Find(lrg._copy_bias);
+  uint copy_lrg = _lrg_map.find(lrg._copy_bias);
   if( copy_lrg != 0 ) {
     // If he has a color,
     if( !(*(_ifg->_yanked))[copy_lrg] ) {
@@ -1423,10 +1578,10 @@
 void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
   if( _spilled_once.test(src->_idx) ) {
     _spilled_once.set(dst->_idx);
-    lrgs(Find(dst))._was_spilled1 = 1;
+    lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
     if( _spilled_twice.test(src->_idx) ) {
       _spilled_twice.set(dst->_idx);
-      lrgs(Find(dst))._was_spilled2 = 1;
+      lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
     }
   }
 }
@@ -1471,7 +1626,7 @@
         MachNode *mach = n->as_Mach();
         inp = mach->operand_index(inp);
         Node *src = n->in(inp);   // Value to load or store
-        LRG &lrg_cisc = lrgs( Find_const(src) );
+        LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
         OptoReg::Name src_reg = lrg_cisc.reg();
         // Doubles record the HIGH register of an adjacent pair.
         src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
@@ -1554,9 +1709,9 @@
       Block *startb = _cfg._bbs[C->top()->_idx];
       startb->_nodes.insert(startb->find_node(C->top()), base );
       _cfg._bbs.map( base->_idx, startb );
-      assert (n2lidx(base) == 0, "should not have LRG yet");
+      assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
     }
-    if (n2lidx(base) == 0) {
+    if (_lrg_map.live_range_id(base) == 0) {
       new_lrg(base, maxlrg++);
     }
     assert(base->in(0) == _cfg._root &&
@@ -1566,7 +1721,7 @@
   }
 
   // Check for AddP-related opcodes
-  if( !derived->is_Phi() ) {
+  if (!derived->is_Phi()) {
     assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name()));
     Node *base = derived->in(AddPNode::Base);
     derived_base_map[derived->_idx] = base;
@@ -1629,9 +1784,9 @@
 // base pointer that is live across the Safepoint for oopmap building.  The
 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
 // required edge set.
-bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) {
+bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
   int must_recompute_live = false;
-  uint maxlrg = _maxlrg;
+  uint maxlrg = _lrg_map.max_lrg_id();
   Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
   memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
 
@@ -1669,15 +1824,18 @@
       }
 
       // Get value being defined
-      uint lidx = n2lidx(n);
-      if( lidx && lidx < _maxlrg /* Ignore the occasional brand-new live range */) {
+      uint lidx = _lrg_map.live_range_id(n);
+      // Ignore the occasional brand-new live range
+      if (lidx && lidx < _lrg_map.max_lrg_id()) {
         // Remove from live-out set
         liveout.remove(lidx);
 
         // Copies do not define a new value and so do not interfere.
         // Remove the copies source from the liveout set before interfering.
         uint idx = n->is_Copy();
-        if( idx ) liveout.remove( n2lidx(n->in(idx)) );
+        if (idx) {
+          liveout.remove(_lrg_map.live_range_id(n->in(idx)));
+        }
       }
 
       // Found a safepoint?
@@ -1695,21 +1853,21 @@
                   derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
           // If its an OOP with a non-zero offset, then it is derived.
           if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
-            Node *base = find_base_for_derived( derived_base_map, derived, maxlrg );
-            assert( base->_idx < _names.Size(), "" );
+            Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
+            assert(base->_idx < _lrg_map.size(), "");
             // Add reaching DEFs of derived pointer and base pointer as a
             // pair of inputs
-            n->add_req( derived );
-            n->add_req( base );
+            n->add_req(derived);
+            n->add_req(base);
 
             // See if the base pointer is already live to this point.
             // Since I'm working on the SSA form, live-ness amounts to
             // reaching def's.  So if I find the base's live range then
             // I know the base's def reaches here.
-            if( (n2lidx(base) >= _maxlrg ||// (Brand new base (hence not live) or
-                 !liveout.member( n2lidx(base) ) ) && // not live) AND
-                 (n2lidx(base) > 0)                && // not a constant
-                 _cfg._bbs[base->_idx] != b ) {     //  base not def'd in blk)
+            if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
+                 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
+                 (_lrg_map.live_range_id(base) > 0) && // not a constant
+                 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
               // Base pointer is not currently live.  Since I stretched
               // the base pointer to here and it crosses basic-block
               // boundaries, the global live info is now incorrect.
@@ -1721,11 +1879,12 @@
       } // End of if found a GC point
 
       // Make all inputs live
-      if( !n->is_Phi() ) {      // Phi function uses come from prior block
-        for( uint k = 1; k < n->req(); k++ ) {
-          uint lidx = n2lidx(n->in(k));
-          if( lidx < _maxlrg )
-            liveout.insert( lidx );
+      if (!n->is_Phi()) {      // Phi function uses come from prior block
+        for (uint k = 1; k < n->req(); k++) {
+          uint lidx = _lrg_map.live_range_id(n->in(k));
+          if (lidx < _lrg_map.max_lrg_id()) {
+            liveout.insert(lidx);
+          }
         }
       }
 
@@ -1733,11 +1892,12 @@
     liveout.clear();  // Free the memory used by liveout.
 
   } // End of forall blocks
-  _maxlrg = maxlrg;
+  _lrg_map.set_max_lrg_id(maxlrg);
 
   // If I created a new live range I need to recompute live
-  if( maxlrg != _ifg->_maxlrg )
+  if (maxlrg != _ifg->_maxlrg) {
     must_recompute_live = true;
+  }
 
   return must_recompute_live != 0;
 }
@@ -1745,16 +1905,17 @@
 
 //------------------------------add_reference----------------------------------
 // Extend the node to LRG mapping
-void PhaseChaitin::add_reference( const Node *node, const Node *old_node ) {
-  _names.extend( node->_idx, n2lidx(old_node) );
+
+void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
+  _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
 }
 
 //------------------------------dump-------------------------------------------
 #ifndef PRODUCT
-void PhaseChaitin::dump( const Node *n ) const {
-  uint r = (n->_idx < _names.Size() ) ? Find_const(n) : 0;
+void PhaseChaitin::dump(const Node *n) const {
+  uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
   tty->print("L%d",r);
-  if( r && n->Opcode() != Op_Phi ) {
+  if (r && n->Opcode() != Op_Phi) {
     if( _node_regs ) {          // Got a post-allocation copy of allocation?
       tty->print("[");
       OptoReg::Name second = get_reg_second(n);
@@ -1775,11 +1936,13 @@
   tty->print("/N%d\t",n->_idx);
   tty->print("%s === ", n->Name());
   uint k;
-  for( k = 0; k < n->req(); k++) {
+  for (k = 0; k < n->req(); k++) {
     Node *m = n->in(k);
-    if( !m ) tty->print("_ ");
+    if (!m) {
+      tty->print("_ ");
+    }
     else {
-      uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0;
+      uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
       tty->print("L%d",r);
       // Data MultiNode's can have projections with no real registers.
       // Don't die while dumping them.
@@ -1810,8 +1973,10 @@
   if( k < n->len() && n->in(k) ) tty->print("| ");
   for( ; k < n->len(); k++ ) {
     Node *m = n->in(k);
-    if( !m ) break;
-    uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0;
+    if(!m) {
+      break;
+    }
+    uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
     tty->print("L%d",r);
     tty->print("/N%d ",m->_idx);
   }
@@ -1839,7 +2004,7 @@
     tty->print("{");
     uint i;
     while ((i = elements.next()) != 0) {
-      tty->print("L%d ", Find_const(i));
+      tty->print("L%d ", _lrg_map.find_const(i));
     }
     tty->print_cr("}");
   }
@@ -1863,10 +2028,14 @@
 
   // Dump LRG array
   tty->print("--- Live RanGe Array ---\n");
-  for(uint i2 = 1; i2 < _maxlrg; i2++ ) {
+  for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
     tty->print("L%d: ",i2);
-    if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( );
-    else tty->print_cr("new LRG");
+    if (i2 < _ifg->_maxlrg) {
+      lrgs(i2).dump();
+    }
+    else {
+      tty->print_cr("new LRG");
+    }
   }
   tty->print_cr("");
 
@@ -1939,7 +2108,7 @@
     // Post allocation, use direct mappings, no LRG info available
     print_reg( get_reg_first(n), this, buf );
   } else {
-    uint lidx = Find_const(n); // Grab LRG number
+    uint lidx = _lrg_map.find_const(n); // Grab LRG number
     if( !_ifg ) {
       sprintf(buf,"L%d",lidx);  // No register binding yet
     } else if( !lidx ) {        // Special, not allocated value
@@ -1968,7 +2137,7 @@
   if( WizardMode && (PrintCompilation || PrintOpto) ) {
     // Display which live ranges need to be split and the allocator's state
     tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
-    for( uint bidx = 1; bidx < _maxlrg; bidx++ ) {
+    for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
       if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
         tty->print("L%d: ", bidx);
         lrgs(bidx).dump();
@@ -2099,14 +2268,17 @@
 void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
   tty->print_cr("---dump of L%d---",lidx);
 
-  if( _ifg ) {
-    if( lidx >= _maxlrg ) {
+  if (_ifg) {
+    if (lidx >= _lrg_map.max_lrg_id()) {
       tty->print("Attempt to print live range index beyond max live range.\n");
       return;
     }
     tty->print("L%d: ",lidx);
-    if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( );
-    else tty->print_cr("new LRG");
+    if (lidx < _ifg->_maxlrg) {
+      lrgs(lidx).dump();
+    } else {
+      tty->print_cr("new LRG");
+    }
   }
   if( _ifg && lidx < _ifg->_maxlrg) {
     tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
@@ -2121,8 +2293,8 @@
     // For all instructions
     for( uint j = 0; j < b->_nodes.size(); j++ ) {
       Node *n = b->_nodes[j];
-      if( Find_const(n) == lidx ) {
-        if( !dump_once++ ) {
+      if (_lrg_map.find_const(n) == lidx) {
+        if (!dump_once++) {
           tty->cr();
           b->dump_head( &_cfg._bbs );
         }
@@ -2133,11 +2305,13 @@
         uint cnt = n->req();
         for( uint k = 1; k < cnt; k++ ) {
           Node *m = n->in(k);
-          if (!m)  continue;  // be robust in the dumper
-          if( Find_const(m) == lidx ) {
-            if( !dump_once++ ) {
+          if (!m)  {
+            continue;  // be robust in the dumper
+          }
+          if (_lrg_map.find_const(m) == lidx) {
+            if (!dump_once++) {
               tty->cr();
-              b->dump_head( &_cfg._bbs );
+              b->dump_head(&_cfg._bbs);
             }
             dump(n);
           }