8022284: Hide internal data structure in PhaseCFG
authoradlertz
Wed, 07 Aug 2013 17:56:19 +0200
changeset 19279 4be3c2e6663c
parent 19160 6731e700c220
child 19280 17dd83a6dd79
8022284: Hide internal data structure in PhaseCFG Summary: Hide private node to block mapping using public interface Reviewed-by: kvn, roland
hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java
hotspot/src/share/vm/opto/block.cpp
hotspot/src/share/vm/opto/block.hpp
hotspot/src/share/vm/opto/buildOopMap.cpp
hotspot/src/share/vm/opto/chaitin.cpp
hotspot/src/share/vm/opto/coalesce.cpp
hotspot/src/share/vm/opto/compile.cpp
hotspot/src/share/vm/opto/domgraph.cpp
hotspot/src/share/vm/opto/gcm.cpp
hotspot/src/share/vm/opto/idealGraphPrinter.cpp
hotspot/src/share/vm/opto/ifg.cpp
hotspot/src/share/vm/opto/lcm.cpp
hotspot/src/share/vm/opto/live.cpp
hotspot/src/share/vm/opto/node.hpp
hotspot/src/share/vm/opto/output.cpp
hotspot/src/share/vm/opto/output.hpp
hotspot/src/share/vm/opto/postaloc.cpp
hotspot/src/share/vm/opto/reg_split.cpp
hotspot/src/share/vm/runtime/vmStructs.cpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Wed Aug 07 17:56:19 2013 +0200
@@ -44,7 +44,7 @@
     Type type      = db.lookupType("PhaseCFG");
     numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0);
     blocksField = type.getAddressField("_blocks");
-    bbsField = type.getAddressField("_bbs");
+    bbsField = type.getAddressField("_node_to_block_mapping");
     brootField = type.getAddressField("_broot");
   }
 
--- a/hotspot/src/share/vm/opto/block.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/block.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -221,7 +221,7 @@
 //------------------------------is_uncommon------------------------------------
 // True if block is low enough frequency or guarded by a test which
 // mostly does not go here.
-bool Block::is_uncommon( Block_Array &bbs ) const {
+bool Block::is_uncommon(PhaseCFG* cfg) const {
   // Initial blocks must never be moved, so are never uncommon.
   if (head()->is_Root() || head()->is_Start())  return false;
 
@@ -238,7 +238,7 @@
   uint uncommon_for_freq_preds = 0;
 
   for( uint i=1; i<num_preds(); i++ ) {
-    Block* guard = bbs[pred(i)->_idx];
+    Block* guard = cfg->get_block_for_node(pred(i));
     // Check to see if this block follows its guard 1 time out of 10000
     // or less.
     //
@@ -285,11 +285,11 @@
   }
 }
 
-void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const {
+void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
   if (is_connector()) {
     for (uint i=1; i<num_preds(); i++) {
-      Block *p = ((*bbs)[pred(i)->_idx]);
-      p->dump_pred(bbs, orig, st);
+      Block *p = cfg->get_block_for_node(pred(i));
+      p->dump_pred(cfg, orig, st);
     }
   } else {
     dump_bidx(orig, st);
@@ -297,7 +297,7 @@
   }
 }
 
-void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
+void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
   // Print the basic block
   dump_bidx(this, st);
   st->print(": #\t");
@@ -311,26 +311,28 @@
   if( head()->is_block_start() ) {
     for (uint i=1; i<num_preds(); i++) {
       Node *s = pred(i);
-      if (bbs) {
-        Block *p = (*bbs)[s->_idx];
-        p->dump_pred(bbs, p, st);
+      if (cfg != NULL) {
+        Block *p = cfg->get_block_for_node(s);
+        p->dump_pred(cfg, p, st);
       } else {
         while (!s->is_block_start())
           s = s->in(0);
         st->print("N%d ", s->_idx );
       }
     }
-  } else
+  } else {
     st->print("BLOCK HEAD IS JUNK  ");
+  }
 
   // Print loop, if any
   const Block *bhead = this;    // Head of self-loop
   Node *bh = bhead->head();
-  if( bbs && bh->is_Loop() && !head()->is_Root() ) {
+
+  if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
     LoopNode *loop = bh->as_Loop();
-    const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
+    const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
     while (bx->is_connector()) {
-      bx = (*bbs)[bx->pred(1)->_idx];
+      bx = cfg->get_block_for_node(bx->pred(1));
     }
     st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
     // Dump any loop-specific bits, especially for CountedLoops.
@@ -349,29 +351,32 @@
   st->print_cr("");
 }
 
-void Block::dump() const { dump(NULL); }
+void Block::dump() const {
+  dump(NULL);
+}
 
-void Block::dump( const Block_Array *bbs ) const {
-  dump_head(bbs);
-  uint cnt = _nodes.size();
-  for( uint i=0; i<cnt; i++ )
+void Block::dump(const PhaseCFG* cfg) const {
+  dump_head(cfg);
+  for (uint i=0; i< _nodes.size(); i++) {
     _nodes[i]->dump();
+  }
   tty->print("\n");
 }
 #endif
 
 //=============================================================================
 //------------------------------PhaseCFG---------------------------------------
-PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
-  Phase(CFG),
-  _bbs(a),
-  _root(r),
-  _node_latency(NULL)
+PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
+: Phase(CFG)
+, _block_arena(arena)
+, _node_to_block_mapping(arena)
+, _root(root)
+, _node_latency(NULL)
 #ifndef PRODUCT
-  , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
+, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
 #endif
 #ifdef ASSERT
-  , _raw_oops(a)
+, _raw_oops(arena)
 #endif
 {
   ResourceMark rm;
@@ -380,13 +385,13 @@
   // Node on demand.
   Node *x = new (C) GotoNode(NULL);
   x->init_req(0, x);
-  _goto = m.match_tree(x);
+  _goto = matcher.match_tree(x);
   assert(_goto != NULL, "");
   _goto->set_req(0,_goto);
 
   // Build the CFG in Reverse Post Order
   _num_blocks = build_cfg();
-  _broot = _bbs[_root->_idx];
+  _broot = get_block_for_node(_root);
 }
 
 //------------------------------build_cfg--------------------------------------
@@ -440,9 +445,9 @@
       // 'p' now points to the start of this basic block
 
       // Put self in array of basic blocks
-      Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
-      _bbs.map(p->_idx,bb);
-      _bbs.map(x->_idx,bb);
+      Block *bb = new (_block_arena) Block(_block_arena, p);
+      map_node_to_block(p, bb);
+      map_node_to_block(x, bb);
       if( x != p ) {                // Only for root is x == p
         bb->_nodes.push((Node*)x);
       }
@@ -473,16 +478,16 @@
       // Check if it the fist node pushed on stack at the beginning.
       if (idx == 0) break;          // end of the build
       // Find predecessor basic block
-      Block *pb = _bbs[x->_idx];
+      Block *pb = get_block_for_node(x);
       // Insert into nodes array, if not already there
-      if( !_bbs.lookup(proj->_idx) ) {
+      if (!has_block(proj)) {
         assert( x != proj, "" );
         // Map basic block of projection
-        _bbs.map(proj->_idx,pb);
+        map_node_to_block(proj, pb);
         pb->_nodes.push(proj);
       }
       // Insert self as a child of my predecessor block
-      pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
+      pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
       assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
               "too many control users, not a CFG?" );
     }
@@ -511,15 +516,15 @@
   RegionNode* region = new (C) RegionNode(2);
   region->init_req(1, proj);
   // setup corresponding basic block
-  Block* block = new (_bbs._arena) Block(_bbs._arena, region);
-  _bbs.map(region->_idx, block);
+  Block* block = new (_block_arena) Block(_block_arena, region);
+  map_node_to_block(region, block);
   C->regalloc()->set_bad(region->_idx);
   // add a goto node
   Node* gto = _goto->clone(); // get a new goto node
   gto->set_req(0, region);
   // add it to the basic block
   block->_nodes.push(gto);
-  _bbs.map(gto->_idx, block);
+  map_node_to_block(gto, block);
   C->regalloc()->set_bad(gto->_idx);
   // hook up successor block
   block->_succs.map(block->_num_succs++, out);
@@ -570,7 +575,7 @@
   gto->set_req(0, b->head());
   Node *bp = b->_nodes[end_idx];
   b->_nodes.map(end_idx,gto); // Slam over NeverBranch
-  _bbs.map(gto->_idx, b);
+  map_node_to_block(gto, b);
   C->regalloc()->set_bad(gto->_idx);
   b->_nodes.pop();              // Yank projections
   b->_nodes.pop();              // Yank projections
@@ -613,7 +618,7 @@
   // If the previous block conditionally falls into bx, return false,
   // because moving bx will create an extra jump.
   for(uint k = 1; k < bx->num_preds(); k++ ) {
-    Block* pred = _bbs[bx->pred(k)->_idx];
+    Block* pred = get_block_for_node(bx->pred(k));
     if (pred == _blocks[bx_index-1]) {
       if (pred->_num_succs != 1) {
         return false;
@@ -682,7 +687,7 @@
 
     // Look for uncommon blocks and move to end.
     if (!C->do_freq_based_layout()) {
-      if( b->is_uncommon(_bbs) ) {
+      if (b->is_uncommon(this)) {
         move_to_end(b, i);
         last--;                   // No longer check for being uncommon!
         if( no_flip_branch(b) ) { // Fall-thru case must follow?
@@ -870,28 +875,31 @@
   } while( !p->is_block_start() );
 
   // Recursively visit
-  for( uint i=1; i<p->req(); i++ )
-    _dump_cfg(p->in(i),visited);
+  for (uint i = 1; i < p->req(); i++) {
+    _dump_cfg(p->in(i), visited);
+  }
 
   // Dump the block
-  _bbs[p->_idx]->dump(&_bbs);
+  get_block_for_node(p)->dump(this);
 }
 
 void PhaseCFG::dump( ) const {
   tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
-  if( _blocks.size() ) {        // Did we do basic-block layout?
-    for( uint i=0; i<_num_blocks; i++ )
-      _blocks[i]->dump(&_bbs);
+  if (_blocks.size()) {        // Did we do basic-block layout?
+    for (uint i = 0; i < _num_blocks; i++) {
+      _blocks[i]->dump(this);
+    }
   } else {                      // Else do it with a DFS
-    VectorSet visited(_bbs._arena);
+    VectorSet visited(_block_arena);
     _dump_cfg(_root,visited);
   }
 }
 
 void PhaseCFG::dump_headers() {
   for( uint i = 0; i < _num_blocks; i++ ) {
-    if( _blocks[i] == NULL ) continue;
-    _blocks[i]->dump_head(&_bbs);
+    if (_blocks[i]) {
+      _blocks[i]->dump_head(this);
+    }
   }
 }
 
@@ -904,7 +912,7 @@
     uint j;
     for (j = 0; j < cnt; j++)  {
       Node *n = b->_nodes[j];
-      assert( _bbs[n->_idx] == b, "" );
+      assert(get_block_for_node(n) == b, "");
       if (j >= 1 && n->is_Mach() &&
           n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
         assert(j == 1 || b->_nodes[j-1]->is_Phi(),
@@ -913,13 +921,12 @@
       for (uint k = 0; k < n->req(); k++) {
         Node *def = n->in(k);
         if (def && def != n) {
-          assert(_bbs[def->_idx] || def->is_Con(),
-                 "must have block; constants for debug info ok");
+          assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
           // Verify that instructions in the block is in correct order.
           // Uses must follow their definition if they are at the same block.
           // Mostly done to check that MachSpillCopy nodes are placed correctly
           // when CreateEx node is moved in build_ifg_physical().
-          if (_bbs[def->_idx] == b &&
+          if (get_block_for_node(def) == b &&
               !(b->head()->is_Loop() && n->is_Phi()) &&
               // See (+++) comment in reg_split.cpp
               !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
--- a/hotspot/src/share/vm/opto/block.hpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/block.hpp	Wed Aug 07 17:56:19 2013 +0200
@@ -48,13 +48,12 @@
   friend class VMStructs;
   uint _size;                   // allocated size, as opposed to formal limit
   debug_only(uint _limit;)      // limit to formal domain
+  Arena *_arena;                // Arena to allocate in
 protected:
   Block **_blocks;
   void grow( uint i );          // Grow array node to fit
 
 public:
-  Arena *_arena;                // Arena to allocate in
-
   Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
     debug_only(_limit=0);
     _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
@@ -77,7 +76,7 @@
 public:
   uint _cnt;
   Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
-  void push( Block *b ) { map(_cnt++,b); }
+  void push( Block *b ) {  map(_cnt++,b); }
   Block *pop() { return _blocks[--_cnt]; }
   Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
   void remove( uint i );
@@ -284,15 +283,15 @@
   // helper function that adds caller save registers to MachProjNode
   void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
   // Schedule a call next in the block
-  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
+  uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 
   // Perform basic-block local scheduling
   Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
-  void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
-  void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
+  void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
+  void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
   bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
   // Cleanup if any code lands between a Call and his Catch
-  void call_catch_cleanup(Block_Array &bbs, Compile *C);
+  void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
   // Detect implicit-null-check opportunities.  Basically, find NULL checks
   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
   // I can generate a memory op if there is not one nearby.
@@ -331,15 +330,15 @@
 
   // Use frequency calculations and code shape to predict if the block
   // is uncommon.
-  bool is_uncommon( Block_Array &bbs ) const;
+  bool is_uncommon(PhaseCFG* cfg) const;
 
 #ifndef PRODUCT
   // Debugging print of basic block
   void dump_bidx(const Block* orig, outputStream* st = tty) const;
-  void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const;
-  void dump_head( const Block_Array *bbs, outputStream* st = tty ) const;
+  void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
+  void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
   void dump() const;
-  void dump( const Block_Array *bbs ) const;
+  void dump(const PhaseCFG* cfg) const;
 #endif
 };
 
@@ -349,6 +348,12 @@
 class PhaseCFG : public Phase {
   friend class VMStructs;
  private:
+  // Arena for the blocks to be stored in
+  Arena* _block_arena;
+
+  // Map nodes to owning basic block
+  Block_Array _node_to_block_mapping;
+
   // Build a proper looking cfg.  Return count of basic blocks
   uint build_cfg();
 
@@ -371,22 +376,42 @@
 
   Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
   void verify_anti_dependences(Block* LCA, Node* load) {
-    assert(LCA == _bbs[load->_idx], "should already be scheduled");
+    assert(LCA == get_block_for_node(load), "should already be scheduled");
     insert_anti_dependences(LCA, load, true);
   }
 
  public:
-  PhaseCFG( Arena *a, RootNode *r, Matcher &m );
+  PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
 
   uint _num_blocks;             // Count of basic blocks
   Block_List _blocks;           // List of basic blocks
   RootNode *_root;              // Root of whole program
-  Block_Array _bbs;             // Map Nodes to owning Basic Block
   Block *_broot;                // Basic block of root
   uint _rpo_ctr;
   CFGLoop* _root_loop;
   float _outer_loop_freq;       // Outmost loop frequency
 
+
+  // set which block this node should reside in
+  void map_node_to_block(const Node* node, Block* block) {
+    _node_to_block_mapping.map(node->_idx, block);
+  }
+
+  // removes the mapping from a node to a block
+  void unmap_node_from_block(const Node* node) {
+    _node_to_block_mapping.map(node->_idx, NULL);
+  }
+
+  // get the block in which this node resides
+  Block* get_block_for_node(const Node* node) const {
+    return _node_to_block_mapping[node->_idx];
+  }
+
+  // does this node reside in a block; return true
+  bool has_block(const Node* node) const {
+    return (_node_to_block_mapping.lookup(node->_idx) != NULL);
+  }
+
   // Per node latency estimation, valid only during GCM
   GrowableArray<uint> *_node_latency;
 
@@ -405,7 +430,7 @@
   void Estimate_Block_Frequency();
 
   // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
-  // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
+  // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
   void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
 
   // Compute the (backwards) latency of a node from the uses
@@ -454,7 +479,7 @@
   // Insert a node into a block, and update the _bbs
   void insert( Block *b, uint idx, Node *n ) {
     b->_nodes.insert( idx, n );
-    _bbs.map( n->_idx, b );
+    map_node_to_block(n, b);
   }
 
 #ifndef PRODUCT
@@ -543,7 +568,7 @@
     _child(NULL),
     _exit_prob(1.0f) {}
   CFGLoop* parent() { return _parent; }
-  void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk);
+  void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
   void add_member(CFGElement *s) { _members.push(s); }
   void add_nested_loop(CFGLoop* cl);
   Block* head() {
--- a/hotspot/src/share/vm/opto/buildOopMap.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/buildOopMap.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -426,14 +426,16 @@
   }
   memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
   // Push preds onto worklist
-  for( uint i=1; i<root->req(); i++ )
-    worklist->push(cfg->_bbs[root->in(i)->_idx]);
+  for (uint i = 1; i < root->req(); i++) {
+    Block* block = cfg->get_block_for_node(root->in(i));
+    worklist->push(block);
+  }
 
   // ZKM.jar includes tiny infinite loops which are unreached from below.
   // If we missed any blocks, we'll retry here after pushing all missed
   // blocks on the worklist.  Normally this outer loop never trips more
   // than once.
-  while( 1 ) {
+  while (1) {
 
     while( worklist->size() ) { // Standard worklist algorithm
       Block *b = worklist->rpop();
@@ -537,8 +539,10 @@
         for( l=0; l<max_reg_ints; l++ )
           old_live[l] = tmp_live[l];
         // Push preds onto worklist
-        for( l=1; l<(int)b->num_preds(); l++ )
-          worklist->push(cfg->_bbs[b->pred(l)->_idx]);
+        for (l = 1; l < (int)b->num_preds(); l++) {
+          Block* block = cfg->get_block_for_node(b->pred(l));
+          worklist->push(block);
+        }
       }
     }
 
@@ -629,10 +633,9 @@
     // pred to this block.  Otherwise we have to grab a new OopFlow.
     OopFlow *flow = NULL;       // Flag for finding optimized flow
     Block *pred = (Block*)0xdeadbeef;
-    uint j;
     // Scan this block's preds to find a done predecessor
-    for( j=1; j<b->num_preds(); j++ ) {
-      Block *p = _cfg->_bbs[b->pred(j)->_idx];
+    for (uint j = 1; j < b->num_preds(); j++) {
+      Block* p = _cfg->get_block_for_node(b->pred(j));
       OopFlow *p_flow = flows[p->_pre_order];
       if( p_flow ) {            // Predecessor is done
         assert( p_flow->_b == p, "cross check" );
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -295,7 +295,7 @@
 
 
 bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
-  Block *bcon = _cfg._bbs[con->_idx];
+  Block* bcon = _cfg.get_block_for_node(con);
   uint cindex = bcon->find_node(con);
   Node *con_next = bcon->_nodes[cindex+1];
   if (con_next->in(0) != con || !con_next->is_MachProj()) {
@@ -306,7 +306,7 @@
   Node *kills = con_next->clone();
   kills->set_req(0, copy);
   b->_nodes.insert(idx, kills);
-  _cfg._bbs.map(kills->_idx, b);
+  _cfg.map_node_to_block(kills, b);
   new_lrg(kills, max_lrg_id);
   return true;
 }
@@ -962,8 +962,7 @@
         // AggressiveCoalesce.  This effectively pre-virtual-splits
         // around uncommon uses of common defs.
         const RegMask &rm = n->in_RegMask(k);
-        if( !after_aggressive &&
-          _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
+        if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) {
           // Since we are BEFORE aggressive coalesce, leave the register
           // mask untrimmed by the call.  This encourages more coalescing.
           // Later, AFTER aggressive, this live range will have to spill
@@ -1709,16 +1708,15 @@
       // set control to _root and place it into Start block
       // (where top() node is placed).
       base->init_req(0, _cfg._root);
-      Block *startb = _cfg._bbs[C->top()->_idx];
+      Block *startb = _cfg.get_block_for_node(C->top());
       startb->_nodes.insert(startb->find_node(C->top()), base );
-      _cfg._bbs.map( base->_idx, startb );
+      _cfg.map_node_to_block(base, startb);
       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
     }
     if (_lrg_map.live_range_id(base) == 0) {
       new_lrg(base, maxlrg++);
     }
-    assert(base->in(0) == _cfg._root &&
-           _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
+    assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
     derived_base_map[derived->_idx] = base;
     return base;
   }
@@ -1754,12 +1752,12 @@
   base->as_Phi()->set_type(t);
 
   // Search the current block for an existing base-Phi
-  Block *b = _cfg._bbs[derived->_idx];
+  Block *b = _cfg.get_block_for_node(derived);
   for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
     Node *phi = b->_nodes[i];
     if( !phi->is_Phi() ) {      // Found end of Phis with no match?
       b->_nodes.insert( i, base ); // Must insert created Phi here as base
-      _cfg._bbs.map( base->_idx, b );
+      _cfg.map_node_to_block(base, b);
       new_lrg(base,maxlrg++);
       break;
     }
@@ -1815,8 +1813,8 @@
       if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
         Node *phi = n->in(1);
         if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
-          Block *phi_block = _cfg._bbs[phi->_idx];
-          if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) {
+          Block *phi_block = _cfg.get_block_for_node(phi);
+          if (_cfg.get_block_for_node(phi_block->pred(2)) == b) {
             const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
             Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
             insert_proj( phi_block, 1, spill, maxlrg++ );
@@ -1870,7 +1868,7 @@
             if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
                  !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
                  (_lrg_map.live_range_id(base) > 0) && // not a constant
-                 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
+                 _cfg.get_block_for_node(base) != b) { // base not def'd in blk)
               // Base pointer is not currently live.  Since I stretched
               // the base pointer to here and it crosses basic-block
               // boundaries, the global live info is now incorrect.
@@ -1993,8 +1991,8 @@
   tty->print("\n");
 }
 
-void PhaseChaitin::dump( const Block * b ) const {
-  b->dump_head( &_cfg._bbs );
+void PhaseChaitin::dump(const Block *b) const {
+  b->dump_head(&_cfg);
 
   // For all instructions
   for( uint j = 0; j < b->_nodes.size(); j++ )
@@ -2299,7 +2297,7 @@
       if (_lrg_map.find_const(n) == lidx) {
         if (!dump_once++) {
           tty->cr();
-          b->dump_head( &_cfg._bbs );
+          b->dump_head(&_cfg);
         }
         dump(n);
         continue;
@@ -2314,7 +2312,7 @@
           if (_lrg_map.find_const(m) == lidx) {
             if (!dump_once++) {
               tty->cr();
-              b->dump_head(&_cfg._bbs);
+              b->dump_head(&_cfg);
             }
             dump(n);
           }
--- a/hotspot/src/share/vm/opto/coalesce.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/coalesce.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -52,7 +52,7 @@
     // Print a nice block header
     tty->print("B%d: ",b->_pre_order);
     for( j=1; j<b->num_preds(); j++ )
-      tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order);
+      tty->print("B%d ", _phc._cfg.get_block_for_node(b->pred(j))->_pre_order);
     tty->print("-> ");
     for( j=0; j<b->_num_succs; j++ )
       tty->print("B%d ",b->_succs[j]->_pre_order);
@@ -208,7 +208,7 @@
     copy->set_req(idx,tmp);
     // Save source in temp early, before source is killed
     b->_nodes.insert(kill_src_idx,tmp);
-    _phc._cfg._bbs.map( tmp->_idx, b );
+    _phc._cfg.map_node_to_block(tmp, b);
     last_use_idx++;
   }
 
@@ -286,7 +286,7 @@
           Node *m = n->in(j);
           uint src_name = _phc._lrg_map.find(m);
           if (src_name != phi_name) {
-            Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
+            Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
             Node *copy;
             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
             // Rematerialize constants instead of copying them
@@ -305,7 +305,7 @@
             }
             // Insert the copy in the use-def chain
             n->set_req(j, copy);
-            _phc._cfg._bbs.map( copy->_idx, pred );
+            _phc._cfg.map_node_to_block(copy, pred);
             // Extend ("register allocate") the names array for the copy.
             _phc._lrg_map.extend(copy->_idx, phi_name);
           } // End of if Phi names do not match
@@ -343,13 +343,13 @@
             n->set_req(idx, copy);
             // Extend ("register allocate") the names array for the copy.
             _phc._lrg_map.extend(copy->_idx, name);
-            _phc._cfg._bbs.map( copy->_idx, b );
+            _phc._cfg.map_node_to_block(copy, b);
           }
 
         } // End of is two-adr
 
         // Insert a copy at a debug use for a lrg which has high frequency
-        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) {
+        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
           // Walk the debug inputs to the node and check for lrg freq
           JVMState* jvms = n->jvms();
           uint debug_start = jvms ? jvms->debug_start() : 999999;
@@ -391,7 +391,7 @@
               uint max_lrg_id = _phc._lrg_map.max_lrg_id();
               _phc.new_lrg(copy, max_lrg_id);
               _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
-              _phc._cfg._bbs.map(copy->_idx, b);
+              _phc._cfg.map_node_to_block(copy, b);
               //tty->print_cr("Split a debug use in Aggressive Coalesce");
             }  // End of if high frequency use/def
           }  // End of for all debug inputs
@@ -437,7 +437,10 @@
     Block *bs = b->_succs[i];
     // Find index of 'b' in 'bs' predecessors
     uint j=1;
-    while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++;
+    while (_phc._cfg.get_block_for_node(bs->pred(j)) != b) {
+      j++;
+    }
+
     // Visit all the Phis in successor block
     for( uint k = 1; k<bs->_nodes.size(); k++ ) {
       Node *n = bs->_nodes[k];
@@ -510,9 +513,9 @@
   if( bindex < b->_fhrp_index ) b->_fhrp_index--;
 
   // Stretched lr1; add it to liveness of intermediate blocks
-  Block *b2 = _phc._cfg._bbs[src_copy->_idx];
+  Block *b2 = _phc._cfg.get_block_for_node(src_copy);
   while( b != b2 ) {
-    b = _phc._cfg._bbs[b->pred(1)->_idx];
+    b = _phc._cfg.get_block_for_node(b->pred(1));
     _phc._live->live(b)->insert(lr1);
   }
 }
@@ -532,7 +535,7 @@
     bindex2--;                  // Chain backwards 1 instruction
     while( bindex2 == 0 ) {     // At block start, find prior block
       assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
-      b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
+      b2 = _phc._cfg.get_block_for_node(b2->pred(1));
       bindex2 = b2->end_idx()-1;
     }
     // Get prior instruction
@@ -676,8 +679,8 @@
 
   if (UseFPUForSpilling && rm.is_AllStack() ) {
     // Don't coalesce when frequency difference is large
-    Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
-    Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
+    Block *dst_b = _phc._cfg.get_block_for_node(dst_copy);
+    Block *src_def_b = _phc._cfg.get_block_for_node(src_def);
     if (src_def_b->_freq > 10*dst_b->_freq )
       return false;
   }
@@ -690,7 +693,7 @@
   // Another early bail-out test is when we are double-coalescing and the
   // 2 copies are separated by some control flow.
   if( dst_copy != src_copy ) {
-    Block *src_b = _phc._cfg._bbs[src_copy->_idx];
+    Block *src_b = _phc._cfg.get_block_for_node(src_copy);
     Block *b2 = b;
     while( b2 != src_b ) {
       if( b2->num_preds() > 2 ){// Found merge-point
@@ -701,7 +704,7 @@
         //record_bias( _phc._lrgs, lr1, lr2 );
         return false;           // To hard to find all interferences
       }
-      b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
+      b2 = _phc._cfg.get_block_for_node(b2->pred(1));
     }
   }
 
@@ -786,8 +789,9 @@
 // Conservative (but pessimistic) copy coalescing of a single block
 void PhaseConservativeCoalesce::coalesce( Block *b ) {
   // Bail out on infrequent blocks
-  if( b->is_uncommon(_phc._cfg._bbs) )
+  if (b->is_uncommon(&_phc._cfg)) {
     return;
+  }
   // Check this block for copies.
   for( uint i = 1; i<b->end_idx(); i++ ) {
     // Check for actual copies on inputs.  Coalesce a copy into its
--- a/hotspot/src/share/vm/opto/compile.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -2262,7 +2262,7 @@
       tty->print("%3.3x   ", pcs[n->_idx]);
     else
       tty->print("      ");
-    b->dump_head( &_cfg->_bbs );
+    b->dump_head(_cfg);
     if (b->is_connector()) {
       tty->print_cr("        # Empty connector block");
     } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
@@ -3525,7 +3525,7 @@
 }
 
 Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
-  Block* b = Compile::current()->cfg()->_bbs[n->_idx];
+  Block* b = Compile::current()->cfg()->get_block_for_node(n);
   Constant con(type, value, b->_freq);
   add(con);
   return con;
--- a/hotspot/src/share/vm/opto/domgraph.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/domgraph.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -105,8 +105,8 @@
 
     // Step 2:
     Node *whead = w->_block->head();
-    for( uint j=1; j < whead->req(); j++ ) {
-      Block *b = _bbs[whead->in(j)->_idx];
+    for (uint j = 1; j < whead->req(); j++) {
+      Block* b = get_block_for_node(whead->in(j));
       Tarjan *vx = &tarjan[b->_pre_order];
       Tarjan *u = vx->EVAL();
       if( u->_semi < w->_semi )
--- a/hotspot/src/share/vm/opto/gcm.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -66,7 +66,7 @@
 // are in b also.
 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
   // Set basic block of n, Add n to b,
-  _bbs.map(n->_idx, b);
+  map_node_to_block(n, b);
   b->add_inst(n);
 
   // After Matching, nearly any old Node may have projections trailing it.
@@ -75,11 +75,12 @@
   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
     Node*  use  = n->fast_out(i);
     if (use->is_Proj()) {
-      Block* buse = _bbs[use->_idx];
+      Block* buse = get_block_for_node(use);
       if (buse != b) {              // In wrong block?
-        if (buse != NULL)
+        if (buse != NULL) {
           buse->find_remove(use);   // Remove from wrong block
-        _bbs.map(use->_idx, b);     // Re-insert in this block
+        }
+        map_node_to_block(use, b);
         b->add_inst(use);
       }
     }
@@ -97,7 +98,7 @@
   if (p != NULL && p != n) {    // Control from a block projection?
     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
     // Find trailing Region
-    Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
+    Block *pb = get_block_for_node(in0); // Block-projection already has basic block
     uint j = 0;
     if (pb->_num_succs != 1) {  // More then 1 successor?
       // Search for successor
@@ -127,14 +128,15 @@
   while ( spstack.is_nonempty() ) {
     Node *n = spstack.pop();
     if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
-      if( n->pinned() && !_bbs.lookup(n->_idx) ) {  // Pinned?  Nail it down!
+      if( n->pinned() && !has_block(n)) {  // Pinned?  Nail it down!
         assert( n->in(0), "pinned Node must have Control" );
         // Before setting block replace block_proj control edge
         replace_block_proj_ctrl(n);
         Node *input = n->in(0);
-        while( !input->is_block_start() )
+        while (!input->is_block_start()) {
           input = input->in(0);
-        Block *b = _bbs[input->_idx];  // Basic block of controlling input
+        }
+        Block *b = get_block_for_node(input); // Basic block of controlling input
         schedule_node_into_block(n, b);
       }
       for( int i = n->req() - 1; i >= 0; --i ) {  // For all inputs
@@ -149,7 +151,7 @@
 // Assert that new input b2 is dominated by all previous inputs.
 // Check this by by seeing that it is dominated by b1, the deepest
 // input observed until b2.
-static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
+static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
   if (b1 == NULL)  return;
   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
   Block* tmp = b2;
@@ -162,7 +164,7 @@
     for (uint j=0; j<n->len(); j++) { // For all inputs
       Node* inn = n->in(j); // Get input
       if (inn == NULL)  continue;  // Ignore NULL, missing inputs
-      Block* inb = bbs[inn->_idx];
+      Block* inb = cfg->get_block_for_node(inn);
       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
       inn->dump();
@@ -174,20 +176,20 @@
 }
 #endif
 
-static Block* find_deepest_input(Node* n, Block_Array &bbs) {
+static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
   // Find the last input dominated by all other inputs.
   Block* deepb           = NULL;        // Deepest block so far
   int    deepb_dom_depth = 0;
   for (uint k = 0; k < n->len(); k++) { // For all inputs
     Node* inn = n->in(k);               // Get input
     if (inn == NULL)  continue;         // Ignore NULL, missing inputs
-    Block* inb = bbs[inn->_idx];
+    Block* inb = cfg->get_block_for_node(inn);
     assert(inb != NULL, "must already have scheduled this input");
     if (deepb_dom_depth < (int) inb->_dom_depth) {
       // The new inb must be dominated by the previous deepb.
       // The various inputs must be linearly ordered in the dom
       // tree, or else there will not be a unique deepest block.
-      DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
+      DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
       deepb = inb;                      // Save deepest block
       deepb_dom_depth = deepb->_dom_depth;
     }
@@ -243,7 +245,7 @@
         ++i;
         if (in == NULL) continue;    // Ignore NULL, missing inputs
         int is_visited = visited.test_set(in->_idx);
-        if (!_bbs.lookup(in->_idx)) { // Missing block selection?
+        if (!has_block(in)) { // Missing block selection?
           if (is_visited) {
             // assert( !visited.test(in->_idx), "did not schedule early" );
             return false;
@@ -265,9 +267,9 @@
         // any projections which depend on them.
         if (!n->pinned()) {
           // Set earliest legal block.
-          _bbs.map(n->_idx, find_deepest_input(n, _bbs));
+          map_node_to_block(n, find_deepest_input(n, this));
         } else {
-          assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
+          assert(get_block_for_node(n) == get_block_for_node(n->in(0)), "Pinned Node should be at the same block as its control edge");
         }
 
         if (nstack.is_empty()) {
@@ -313,8 +315,8 @@
 // The definition must dominate the use, so move the LCA upward in the
 // dominator tree to dominate the use.  If the use is a phi, adjust
 // the LCA only with the phi input paths which actually use this def.
-static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
-  Block* buse = bbs[use->_idx];
+static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
+  Block* buse = cfg->get_block_for_node(use);
   if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
   if (!use->is_Phi())  return buse->dom_lca(LCA);
   uint pmax = use->req();       // Number of Phi inputs
@@ -329,7 +331,7 @@
   // more than once.
   for (uint j=1; j<pmax; j++) { // For all inputs
     if (use->in(j) == def) {    // Found matching input?
-      Block* pred = bbs[buse->pred(j)->_idx];
+      Block* pred = cfg->get_block_for_node(buse->pred(j));
       LCA = pred->dom_lca(LCA);
     }
   }
@@ -342,8 +344,7 @@
 // which are marked with the given index.  Return the LCA (in the dom tree)
 // of all marked blocks.  If there are none marked, return the original
 // LCA.
-static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
-                                    Block* early, Block_Array &bbs) {
+static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
   Block_List worklist;
   worklist.push(LCA);
   while (worklist.size() > 0) {
@@ -366,7 +367,7 @@
     } else {
       // Keep searching through this block's predecessors.
       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
-        Block* mid_parent = bbs[ mid->pred(j)->_idx ];
+        Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
         worklist.push(mid_parent);
       }
     }
@@ -384,7 +385,7 @@
 // be earlier (at a shallower dom_depth) than the true schedule_early
 // point of the node. We compute this earlier block as a more permissive
 // site for anti-dependency insertion, but only if subsume_loads is enabled.
-static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
+static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
   Node* base;
   Node* index;
   Node* store = load->in(MemNode::Memory);
@@ -412,12 +413,12 @@
     Block* deepb           = NULL;        // Deepest block so far
     int    deepb_dom_depth = 0;
     for (int i = 0; i < mem_inputs_length; i++) {
-      Block* inb = bbs[mem_inputs[i]->_idx];
+      Block* inb = cfg->get_block_for_node(mem_inputs[i]);
       if (deepb_dom_depth < (int) inb->_dom_depth) {
         // The new inb must be dominated by the previous deepb.
         // The various inputs must be linearly ordered in the dom
         // tree, or else there will not be a unique deepest block.
-        DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
+        DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
         deepb = inb;                      // Save deepest block
         deepb_dom_depth = deepb->_dom_depth;
       }
@@ -488,14 +489,14 @@
   // and other inputs are first available.  (Computed by schedule_early.)
   // For normal loads, 'early' is the shallowest place (dom graph wise)
   // to look for anti-deps between this load and any store.
-  Block* early = _bbs[load_index];
+  Block* early = get_block_for_node(load);
 
   // If we are subsuming loads, compute an "early" block that only considers
   // memory or address inputs. This block may be different than the
   // schedule_early block in that it could be at an even shallower depth in the
   // dominator tree, and allow for a broader discovery of anti-dependences.
   if (C->subsume_loads()) {
-    early = memory_early_block(load, early, _bbs);
+    early = memory_early_block(load, early, this);
   }
 
   ResourceArea *area = Thread::current()->resource_area();
@@ -619,7 +620,7 @@
     // or else observe that 'store' is all the way up in the
     // earliest legal block for 'load'.  In the latter case,
     // immediately insert an anti-dependence edge.
-    Block* store_block = _bbs[store->_idx];
+    Block* store_block = get_block_for_node(store);
     assert(store_block != NULL, "unused killing projections skipped above");
 
     if (store->is_Phi()) {
@@ -637,7 +638,7 @@
       for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
         if (store->in(j) == mem) {   // Found matching input?
           DEBUG_ONLY(found_match = true);
-          Block* pred_block = _bbs[store_block->pred(j)->_idx];
+          Block* pred_block = get_block_for_node(store_block->pred(j));
           if (pred_block != early) {
             // If any predecessor of the Phi matches the load's "early block",
             // we do not need a precedence edge between the Phi and 'load'
@@ -711,7 +712,7 @@
   // preventing the load from sinking past any block containing
   // a store that may invalidate the memory state required by 'load'.
   if (must_raise_LCA)
-    LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
+    LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
   if (LCA == early)  return LCA;
 
   // Insert anti-dependence edges from 'load' to each store
@@ -720,7 +721,7 @@
   if (LCA->raise_LCA_mark() == load_index) {
     while (non_early_stores.size() > 0) {
       Node* store = non_early_stores.pop();
-      Block* store_block = _bbs[store->_idx];
+      Block* store_block = get_block_for_node(store);
       if (store_block == LCA) {
         // add anti_dependence from store to load in its own block
         assert(store != load->in(0), "dependence cycle found");
@@ -754,7 +755,7 @@
 
 public:
   // Constructor for the iterator
-  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
+  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
 
   // Postincrement operator to iterate over the nodes
   Node *next();
@@ -762,12 +763,12 @@
 private:
   VectorSet   &_visited;
   Node_List   &_stack;
-  Block_Array &_bbs;
+  PhaseCFG &_cfg;
 };
 
 // Constructor for the Node_Backward_Iterator
-Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
-  : _visited(visited), _stack(stack), _bbs(bbs) {
+Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
+  : _visited(visited), _stack(stack), _cfg(cfg) {
   // The stack should contain exactly the root
   stack.clear();
   stack.push(root);
@@ -797,8 +798,8 @@
     _visited.set(self->_idx);
 
     // Now schedule all uses as late as possible.
-    uint src     = self->is_Proj() ? self->in(0)->_idx : self->_idx;
-    uint src_rpo = _bbs[src]->_rpo;
+    const Node* src = self->is_Proj() ? self->in(0) : self;
+    uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
 
     // Schedule all nodes in a post-order visit
     Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
@@ -814,7 +815,7 @@
 
       // do not traverse backward control edges
       Node *use = n->is_Proj() ? n->in(0) : n;
-      uint use_rpo = _bbs[use->_idx]->_rpo;
+      uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
 
       if ( use_rpo < src_rpo )
         continue;
@@ -852,7 +853,7 @@
     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
 #endif
 
-  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
+  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
   Node *n;
 
   // Walk over all the nodes from last to first
@@ -883,7 +884,7 @@
 
   uint nlen = n->len();
   uint use_latency = _node_latency->at_grow(n->_idx);
-  uint use_pre_order = _bbs[n->_idx]->_pre_order;
+  uint use_pre_order = get_block_for_node(n)->_pre_order;
 
   for ( uint j=0; j<nlen; j++ ) {
     Node *def = n->in(j);
@@ -903,7 +904,7 @@
 #endif
 
     // If the defining block is not known, assume it is ok
-    Block *def_block = _bbs[def->_idx];
+    Block *def_block = get_block_for_node(def);
     uint def_pre_order = def_block ? def_block->_pre_order : 0;
 
     if ( (use_pre_order <  def_pre_order) ||
@@ -931,10 +932,11 @@
 // Compute the latency of a specific use
 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
   // If self-reference, return no latency
-  if (use == n || use->is_Root())
+  if (use == n || use->is_Root()) {
     return 0;
+  }
 
-  uint def_pre_order = _bbs[def->_idx]->_pre_order;
+  uint def_pre_order = get_block_for_node(def)->_pre_order;
   uint latency = 0;
 
   // If the use is not a projection, then it is simple...
@@ -946,7 +948,7 @@
     }
 #endif
 
-    uint use_pre_order = _bbs[use->_idx]->_pre_order;
+    uint use_pre_order = get_block_for_node(use)->_pre_order;
 
     if (use_pre_order < def_pre_order)
       return 0;
@@ -1018,7 +1020,7 @@
   uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
   uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
   bool in_latency    = (target <= start_latency);
-  const Block* root_block = _bbs[_root->_idx];
+  const Block* root_block = get_block_for_node(_root);
 
   // Turn off latency scheduling if scheduling is just plain off
   if (!C->do_scheduling())
@@ -1126,12 +1128,12 @@
     tty->print("\n#---- schedule_late ----\n");
 #endif
 
-  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
+  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
   Node *self;
 
   // Walk over all the nodes from last to first
   while (self = iter.next()) {
-    Block* early = _bbs[self->_idx];   // Earliest legal placement
+    Block* early = get_block_for_node(self); // Earliest legal placement
 
     if (self->is_top()) {
       // Top node goes in bb #2 with other constants.
@@ -1179,7 +1181,7 @@
       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
         // For all uses, find LCA
         Node* use = self->fast_out(i);
-        LCA = raise_LCA_above_use(LCA, use, self, _bbs);
+        LCA = raise_LCA_above_use(LCA, use, self, this);
       }
     }  // (Hide defs of imax, i from rest of block.)
 
@@ -1187,7 +1189,7 @@
     // requirement for correctness but it reduces useless
     // interference between temps and other nodes.
     if (mach != NULL && mach->is_MachTemp()) {
-      _bbs.map(self->_idx, LCA);
+      map_node_to_block(self, LCA);
       LCA->add_inst(self);
       continue;
     }
@@ -1262,10 +1264,10 @@
   }
 #endif
 
-  // Initialize the bbs.map for things on the proj_list
-  uint i;
-  for( i=0; i < proj_list.size(); i++ )
-    _bbs.map(proj_list[i]->_idx, NULL);
+  // Initialize the node to block mapping for things on the proj_list
+  for (uint i = 0; i < proj_list.size(); i++) {
+    unmap_node_from_block(proj_list[i]);
+  }
 
   // Set the basic block for Nodes pinned into blocks
   Arena *a = Thread::current()->resource_area();
@@ -1333,7 +1335,7 @@
     for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
       Node *proj = matcher._null_check_tests[i  ];
       Node *val  = matcher._null_check_tests[i+1];
-      _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
+      get_block_for_node(proj)->implicit_null_check(this, proj, val, allowed_reasons);
       // The implicit_null_check will only perform the transformation
       // if the null branch is truly uncommon, *and* it leads to an
       // uncommon trap.  Combined with the too_many_traps guards
@@ -1353,7 +1355,7 @@
   uint max_idx = C->unique();
   GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
   visited.Clear();
-  for (i = 0; i < _num_blocks; i++) {
+  for (uint i = 0; i < _num_blocks; i++) {
     if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
         C->record_method_not_compilable("local schedule failed");
@@ -1364,8 +1366,9 @@
 
   // If we inserted any instructions between a Call and his CatchNode,
   // clone the instructions on all paths below the Catch.
-  for( i=0; i < _num_blocks; i++ )
-    _blocks[i]->call_catch_cleanup(_bbs, C);
+  for (uint i = 0; i < _num_blocks; i++) {
+    _blocks[i]->call_catch_cleanup(this, C);
+  }
 
 #ifndef PRODUCT
   if (trace_opto_pipelining()) {
@@ -1392,7 +1395,7 @@
     Block_List worklist;
     Block* root_blk = _blocks[0];
     for (uint i = 1; i < root_blk->num_preds(); i++) {
-      Block *pb = _bbs[root_blk->pred(i)->_idx];
+      Block *pb = get_block_for_node(root_blk->pred(i));
       if (pb->has_uncommon_code()) {
         worklist.push(pb);
       }
@@ -1401,7 +1404,7 @@
       Block* uct = worklist.pop();
       if (uct == _broot) continue;
       for (uint i = 1; i < uct->num_preds(); i++) {
-        Block *pb = _bbs[uct->pred(i)->_idx];
+        Block *pb = get_block_for_node(uct->pred(i));
         if (pb->_num_succs == 1) {
           worklist.push(pb);
         } else if (pb->num_fall_throughs() == 2) {
@@ -1430,7 +1433,7 @@
     Block_List worklist;
     Block* root_blk = _blocks[0];
     for (uint i = 1; i < root_blk->num_preds(); i++) {
-      Block *pb = _bbs[root_blk->pred(i)->_idx];
+      Block *pb = get_block_for_node(root_blk->pred(i));
       if (pb->has_uncommon_code()) {
         worklist.push(pb);
       }
@@ -1439,7 +1442,7 @@
       Block* uct = worklist.pop();
       uct->_freq = PROB_MIN;
       for (uint i = 1; i < uct->num_preds(); i++) {
-        Block *pb = _bbs[uct->pred(i)->_idx];
+        Block *pb = get_block_for_node(uct->pred(i));
         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
           worklist.push(pb);
         }
@@ -1499,7 +1502,7 @@
       Block* loop_head = b;
       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
-      Block* tail = _bbs[tail_n->_idx];
+      Block* tail = get_block_for_node(tail_n);
 
       // Defensively filter out Loop nodes for non-single-entry loops.
       // For all reasonable loops, the head occurs before the tail in RPO.
@@ -1514,13 +1517,13 @@
         loop_head->_loop = nloop;
         // Add to nloop so push_pred() will skip over inner loops
         nloop->add_member(loop_head);
-        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
+        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
 
         while (worklist.size() > 0) {
           Block* member = worklist.pop();
           if (member != loop_head) {
             for (uint j = 1; j < member->num_preds(); j++) {
-              nloop->push_pred(member, j, worklist, _bbs);
+              nloop->push_pred(member, j, worklist, this);
             }
           }
         }
@@ -1557,9 +1560,9 @@
 }
 
 //------------------------------push_pred--------------------------------------
-void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
+void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
   Node* pred_n = blk->pred(i);
-  Block* pred = node_to_blk[pred_n->_idx];
+  Block* pred = cfg->get_block_for_node(pred_n);
   CFGLoop *pred_loop = pred->_loop;
   if (pred_loop == NULL) {
     // Filter out blocks for non-single-entry loops.
@@ -1580,7 +1583,7 @@
       Block* pred_head = pred_loop->head();
       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
       assert(pred_head != head(), "loop head in only one loop");
-      push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
+      push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
     } else {
       assert(pred_loop->_parent == this && _parent == NULL, "just checking");
     }
--- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -413,9 +413,9 @@
     print_prop("debug_idx", node->_debug_idx);
 #endif
 
-    if(C->cfg() != NULL) {
-      Block *block = C->cfg()->_bbs[node->_idx];
-      if(block == NULL) {
+    if (C->cfg() != NULL) {
+      Block* block = C->cfg()->get_block_for_node(node);
+      if (block == NULL) {
         print_prop("block", C->cfg()->_blocks[0]->_pre_order);
       } else {
         print_prop("block", block->_pre_order);
--- a/hotspot/src/share/vm/opto/ifg.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/ifg.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -565,7 +565,7 @@
               lrgs(r)._def = 0;
             }
             n->disconnect_inputs(NULL, C);
-            _cfg._bbs.map(n->_idx,NULL);
+            _cfg.unmap_node_from_block(n);
             n->replace_by(C->top());
             // Since yanking a Node from block, high pressure moves up one
             hrp_index[0]--;
@@ -607,7 +607,7 @@
           if( n->is_SpillCopy()
               && lrgs(r).is_singledef()        // MultiDef live range can still split
               && n->outcnt() == 1              // and use must be in this block
-              && _cfg._bbs[n->unique_out()->_idx] == b ) {
+              && _cfg.get_block_for_node(n->unique_out()) == b ) {
             // All single-use MachSpillCopy(s) that immediately precede their
             // use must color early.  If a longer live range steals their
             // color, the spill copy will split and may push another spill copy
--- a/hotspot/src/share/vm/opto/lcm.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -237,7 +237,7 @@
     }
 
     // Check ctrl input to see if the null-check dominates the memory op
-    Block *cb = cfg->_bbs[mach->_idx];
+    Block *cb = cfg->get_block_for_node(mach);
     cb = cb->_idom;             // Always hoist at least 1 block
     if( !was_store ) {          // Stores can be hoisted only one block
       while( cb->_dom_depth > (_dom_depth + 1))
@@ -262,7 +262,7 @@
         if( is_decoden ) continue;
       }
       // Block of memory-op input
-      Block *inb = cfg->_bbs[mach->in(j)->_idx];
+      Block *inb = cfg->get_block_for_node(mach->in(j));
       Block *b = this;          // Start from nul check
       while( b != inb && b->_dom_depth > inb->_dom_depth )
         b = b->_idom;           // search upwards for input
@@ -272,7 +272,7 @@
     }
     if( j > 0 )
       continue;
-    Block *mb = cfg->_bbs[mach->_idx];
+    Block *mb = cfg->get_block_for_node(mach);
     // Hoisting stores requires more checks for the anti-dependence case.
     // Give up hoisting if we have to move the store past any load.
     if( was_store ) {
@@ -291,7 +291,7 @@
           break;                // Found anti-dependent load
         // Make sure control does not do a merge (would have to check allpaths)
         if( b->num_preds() != 2 ) break;
-        b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block
+        b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
       }
       if( b != this ) continue;
     }
@@ -303,15 +303,15 @@
 
     // Found a candidate!  Pick one with least dom depth - the highest
     // in the dom tree should be closest to the null check.
-    if( !best ||
-        cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
+    if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
       best = mach;
       bidx = vidx;
-
     }
   }
   // No candidate!
-  if( !best ) return;
+  if (best == NULL) {
+    return;
+  }
 
   // ---- Found an implicit null check
   extern int implicit_null_checks;
@@ -319,29 +319,29 @@
 
   if( is_decoden ) {
     // Check if we need to hoist decodeHeapOop_not_null first.
-    Block *valb = cfg->_bbs[val->_idx];
+    Block *valb = cfg->get_block_for_node(val);
     if( this != valb && this->_dom_depth < valb->_dom_depth ) {
       // Hoist it up to the end of the test block.
       valb->find_remove(val);
       this->add_inst(val);
-      cfg->_bbs.map(val->_idx,this);
+      cfg->map_node_to_block(val, this);
       // DecodeN on x86 may kill flags. Check for flag-killing projections
       // that also need to be hoisted.
       for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
         Node* n = val->fast_out(j);
         if( n->is_MachProj() ) {
-          cfg->_bbs[n->_idx]->find_remove(n);
+          cfg->get_block_for_node(n)->find_remove(n);
           this->add_inst(n);
-          cfg->_bbs.map(n->_idx,this);
+          cfg->map_node_to_block(n, this);
         }
       }
     }
   }
   // Hoist the memory candidate up to the end of the test block.
-  Block *old_block = cfg->_bbs[best->_idx];
+  Block *old_block = cfg->get_block_for_node(best);
   old_block->find_remove(best);
   add_inst(best);
-  cfg->_bbs.map(best->_idx,this);
+  cfg->map_node_to_block(best, this);
 
   // Move the control dependence
   if (best->in(0) && best->in(0) == old_block->_nodes[0])
@@ -352,9 +352,9 @@
   for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
     Node* n = best->fast_out(j);
     if( n->is_MachProj() ) {
-      cfg->_bbs[n->_idx]->find_remove(n);
+      cfg->get_block_for_node(n)->find_remove(n);
       add_inst(n);
-      cfg->_bbs.map(n->_idx,this);
+      cfg->map_node_to_block(n, this);
     }
   }
 
@@ -385,7 +385,7 @@
   Node *old_tst = proj->in(0);
   MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
   _nodes.map(end_idx(),nul_chk);
-  cfg->_bbs.map(nul_chk->_idx,this);
+  cfg->map_node_to_block(nul_chk, this);
   // Redirect users of old_test to nul_chk
   for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
     old_tst->last_out(i2)->set_req(0, nul_chk);
@@ -468,7 +468,7 @@
         Node* use = n->fast_out(j);
 
         // The use is a conditional branch, make them adjacent
-        if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) {
+        if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
           found_machif = true;
           break;
         }
@@ -529,13 +529,14 @@
 
 
 //------------------------------set_next_call----------------------------------
-void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
+void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
   if( next_call.test_set(n->_idx) ) return;
   for( uint i=0; i<n->len(); i++ ) {
     Node *m = n->in(i);
     if( !m ) continue;  // must see all nodes in block that precede call
-    if( bbs[m->_idx] == this )
-      set_next_call( m, next_call, bbs );
+    if (cfg->get_block_for_node(m) == this) {
+      set_next_call(m, next_call, cfg);
+    }
   }
 }
 
@@ -545,12 +546,12 @@
 // next subroutine call get priority - basically it moves things NOT needed
 // for the next call till after the call.  This prevents me from trying to
 // carry lots of stuff live across a call.
-void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) {
+void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
   // Find the next control-defining Node in this block
   Node* call = NULL;
   for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
     Node* m = this_call->fast_out(i);
-    if( bbs[m->_idx] == this && // Local-block user
+    if(cfg->get_block_for_node(m) == this && // Local-block user
         m != this_call &&       // Not self-start node
         m->is_MachCall() )
       call = m;
@@ -558,7 +559,7 @@
   }
   if (call == NULL)  return;    // No next call (e.g., block end is near)
   // Set next-call for all inputs to this call
-  set_next_call(call, next_call, bbs);
+  set_next_call(call, next_call, cfg);
 }
 
 //------------------------------add_call_kills-------------------------------------
@@ -578,7 +579,7 @@
 
 
 //------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
   RegMask regs;
 
   // Schedule all the users of the call right now.  All the users are
@@ -597,12 +598,14 @@
     // Check for scheduling the next control-definer
     if( n->bottom_type() == Type::CONTROL )
       // Warm up next pile of heuristic bits
-      needed_for_next_call(n, next_call, bbs);
+      needed_for_next_call(n, next_call, cfg);
 
     // Children of projections are now all ready
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j); // Get user
-      if( bbs[m->_idx] != this ) continue;
+      if(cfg->get_block_for_node(m) != this) {
+        continue;
+      }
       if( m->is_Phi() ) continue;
       int m_cnt = ready_cnt.at(m->_idx)-1;
       ready_cnt.at_put(m->_idx, m_cnt);
@@ -620,7 +623,7 @@
   uint r_cnt = mcall->tf()->range()->cnt();
   int op = mcall->ideal_Opcode();
   MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
-  bbs.map(proj->_idx,this);
+  cfg->map_node_to_block(proj, this);
   _nodes.insert(node_cnt++, proj);
 
   // Select the right register save policy.
@@ -708,7 +711,7 @@
       uint local = 0;
       for( uint j=0; j<cnt; j++ ) {
         Node *m = n->in(j);
-        if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
+        if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
           local++;              // One more block-local input
       }
       ready_cnt.at_put(n->_idx, local); // Count em up
@@ -720,7 +723,7 @@
           for (uint prec = n->req(); prec < n->len(); prec++) {
             Node* oop_store = n->in(prec);
             if (oop_store != NULL) {
-              assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+              assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
             }
           }
         }
@@ -753,7 +756,7 @@
     Node *n = _nodes[i3];       // Get pre-scheduled
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j);
-      if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
+      if (cfg->get_block_for_node(m) == this) { // Local-block user
         int m_cnt = ready_cnt.at(m->_idx)-1;
         ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
       }
@@ -786,7 +789,7 @@
   }
 
   // Warm up the 'next_call' heuristic bits
-  needed_for_next_call(_nodes[0], next_call, cfg->_bbs);
+  needed_for_next_call(_nodes[0], next_call, cfg);
 
 #ifndef PRODUCT
     if (cfg->trace_opto_pipelining()) {
@@ -837,7 +840,7 @@
 #endif
     if( n->is_MachCall() ) {
       MachCallNode *mcall = n->as_MachCall();
-      phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
+      phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
       continue;
     }
 
@@ -847,7 +850,7 @@
       regs.OR(n->out_RegMask());
 
       MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
-      cfg->_bbs.map(proj->_idx,this);
+      cfg->map_node_to_block(proj, this);
       _nodes.insert(phi_cnt++, proj);
 
       add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
@@ -856,7 +859,9 @@
     // Children are now all ready
     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
       Node* m = n->fast_out(i5); // Get user
-      if( cfg->_bbs[m->_idx] != this ) continue;
+      if (cfg->get_block_for_node(m) != this) {
+        continue;
+      }
       if( m->is_Phi() ) continue;
       if (m->_idx >= max_idx) { // new node, skip it
         assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
@@ -914,7 +919,7 @@
 }
 
 //------------------------------catch_cleanup_find_cloned_def------------------
-static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
+static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
   assert( use_blk != def_blk, "Inter-block cleanup only");
 
   // The use is some block below the Catch.  Find and return the clone of the def
@@ -940,7 +945,8 @@
     // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
     Node_Array inputs = new Node_List(Thread::current()->resource_area());
     for(uint k = 1; k < use_blk->num_preds(); k++) {
-      inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx));
+      Block* block = cfg->get_block_for_node(use_blk->pred(k));
+      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
     }
 
     // Check to see if the use_blk already has an identical phi inserted.
@@ -962,7 +968,7 @@
     if (fixup == NULL) {
       Node *new_phi = PhiNode::make(use_blk->head(), def);
       use_blk->_nodes.insert(1, new_phi);
-      bbs.map(new_phi->_idx, use_blk);
+      cfg->map_node_to_block(new_phi, use_blk);
       for (uint k = 1; k < use_blk->num_preds(); k++) {
         new_phi->set_req(k, inputs[k]);
       }
@@ -1002,17 +1008,17 @@
 //------------------------------catch_cleanup_inter_block---------------------
 // Fix all input edges in use that reference "def".  The use is in a different
 // block than the def.
-static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
+static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
   if( !use_blk ) return;        // Can happen if the use is a precedence edge
 
-  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx);
+  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
   catch_cleanup_fix_all_inputs(use, def, new_def);
 }
 
 //------------------------------call_catch_cleanup-----------------------------
 // If we inserted any instructions between a Call and his CatchNode,
 // clone the instructions on all paths below the Catch.
-void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
+void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
 
   // End of region to clone
   uint end = end_idx();
@@ -1037,7 +1043,7 @@
       // since clones dominate on each path.
       Node *clone = _nodes[j-1]->clone();
       sb->_nodes.insert( 1, clone );
-      bbs.map(clone->_idx,sb);
+      cfg->map_node_to_block(clone, sb);
     }
   }
 
@@ -1054,18 +1060,19 @@
     uint max = out->size();
     for (uint j = 0; j < max; j++) {// For all users
       Node *use = out->pop();
-      Block *buse = bbs[use->_idx];
+      Block *buse = cfg->get_block_for_node(use);
       if( use->is_Phi() ) {
         for( uint k = 1; k < use->req(); k++ )
           if( use->in(k) == n ) {
-            Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx);
+            Block* block = cfg->get_block_for_node(buse->pred(k));
+            Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
             use->set_req(k, fixup);
           }
       } else {
         if (this == buse) {
           catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
         } else {
-          catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx);
+          catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
         }
       }
     } // End for all users
--- a/hotspot/src/share/vm/opto/live.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/live.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -101,7 +101,7 @@
       for( uint k=1; k<cnt; k++ ) {
         Node *nk = n->in(k);
         uint nkidx = nk->_idx;
-        if( _cfg._bbs[nkidx] != b ) {
+        if (_cfg.get_block_for_node(nk) != b) {
           uint u = _names[nkidx];
           use->insert( u );
           DEBUG_ONLY(def_outside->insert( u );)
@@ -121,7 +121,7 @@
 
     // Push these live-in things to predecessors
     for( uint l=1; l<b->num_preds(); l++ ) {
-      Block *p = _cfg._bbs[b->pred(l)->_idx];
+      Block *p = _cfg.get_block_for_node(b->pred(l));
       add_liveout( p, use, first_pass );
 
       // PhiNode uses go in the live-out set of prior blocks.
@@ -142,8 +142,10 @@
       assert( delta->count(), "missing delta set" );
 
       // Add new-live-in to predecessors live-out sets
-      for( uint l=1; l<b->num_preds(); l++ )
-        add_liveout( _cfg._bbs[b->pred(l)->_idx], delta, first_pass );
+      for (uint l = 1; l < b->num_preds(); l++) {
+        Block* block = _cfg.get_block_for_node(b->pred(l));
+        add_liveout(block, delta, first_pass);
+      }
 
       freeset(b);
     } // End of while-worklist-not-empty
--- a/hotspot/src/share/vm/opto/node.hpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp	Wed Aug 07 17:56:19 2013 +0200
@@ -42,7 +42,6 @@
 class AllocateArrayNode;
 class AllocateNode;
 class Block;
-class Block_Array;
 class BoolNode;
 class BoxLockNode;
 class CMoveNode;
--- a/hotspot/src/share/vm/opto/output.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -68,7 +68,6 @@
     return;
   }
   // Make sure I can find the Start Node
-  Block_Array& bbs = _cfg->_bbs;
   Block *entry = _cfg->_blocks[1];
   Block *broot = _cfg->_broot;
 
@@ -77,8 +76,8 @@
   // Replace StartNode with prolog
   MachPrologNode *prolog = new (this) MachPrologNode();
   entry->_nodes.map( 0, prolog );
-  bbs.map( prolog->_idx, entry );
-  bbs.map( start->_idx, NULL ); // start is no longer in any block
+  _cfg->map_node_to_block(prolog, entry);
+  _cfg->unmap_node_from_block(start); // start is no longer in any block
 
   // Virtual methods need an unverified entry point
 
@@ -117,8 +116,7 @@
       if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
         MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
         b->add_inst( epilog );
-        bbs.map(epilog->_idx, b);
-        //_regalloc->set_bad(epilog->_idx); // Already initialized this way.
+        _cfg->map_node_to_block(epilog, b);
       }
     }
   }
@@ -252,7 +250,7 @@
         if (insert) {
           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
           b->_nodes.insert( j, zap );
-          _cfg->_bbs.map( zap->_idx, b );
+          _cfg->map_node_to_block(zap, b);
           ++j;
         }
       }
@@ -1234,7 +1232,7 @@
 #ifdef ASSERT
     if (!b->is_connector()) {
       stringStream st;
-      b->dump_head(&_cfg->_bbs, &st);
+      b->dump_head(_cfg, &st);
       MacroAssembler(cb).block_comment(st.as_string());
     }
     jmp_target[i] = 0;
@@ -1310,7 +1308,7 @@
           MachNode *nop = new (this) MachNopNode(nops_cnt);
           b->_nodes.insert(j++, nop);
           last_inst++;
-          _cfg->_bbs.map( nop->_idx, b );
+          _cfg->map_node_to_block(nop, b);
           nop->emit(*cb, _regalloc);
           cb->flush_bundle(true);
           current_offset = cb->insts_size();
@@ -1395,7 +1393,7 @@
               if (needs_padding && replacement->avoid_back_to_back()) {
                 MachNode *nop = new (this) MachNopNode();
                 b->_nodes.insert(j++, nop);
-                _cfg->_bbs.map(nop->_idx, b);
+                _cfg->map_node_to_block(nop, b);
                 last_inst++;
                 nop->emit(*cb, _regalloc);
                 cb->flush_bundle(true);
@@ -1549,7 +1547,7 @@
       if( padding > 0 ) {
         MachNode *nop = new (this) MachNopNode(padding / nop_size);
         b->_nodes.insert( b->_nodes.size(), nop );
-        _cfg->_bbs.map( nop->_idx, b );
+        _cfg->map_node_to_block(nop, b);
         nop->emit(*cb, _regalloc);
         current_offset = cb->insts_size();
       }
@@ -1737,7 +1735,6 @@
 Scheduling::Scheduling(Arena *arena, Compile &compile)
   : _arena(arena),
     _cfg(compile.cfg()),
-    _bbs(compile.cfg()->_bbs),
     _regalloc(compile.regalloc()),
     _reg_node(arena),
     _bundle_instr_count(0),
@@ -2085,8 +2082,9 @@
     if( def->is_Proj() )        // If this is a machine projection, then
       def = def->in(0);         // propagate usage thru to the base instruction
 
-    if( _bbs[def->_idx] != bb ) // Ignore if not block-local
+    if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
       continue;
+    }
 
     // Compute the latency
     uint l = _bundle_cycle_number + n->latency(i);
@@ -2358,9 +2356,10 @@
       Node *inp = n->in(k);
       if (!inp) continue;
       assert(inp != n, "no cycles allowed" );
-      if( _bbs[inp->_idx] == bb ) { // Block-local use?
-        if( inp->is_Proj() )    // Skip through Proj's
+      if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
+        if (inp->is_Proj()) { // Skip through Proj's
           inp = inp->in(0);
+        }
         ++_uses[inp->_idx];     // Count 1 block-local use
       }
     }
@@ -2643,7 +2642,7 @@
     return;
 
   Node *pinch = _reg_node[def_reg]; // Get pinch point
-  if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet?
+  if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
       is_def ) {    // Check for a true def (not a kill)
     _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
     return;
@@ -2669,7 +2668,7 @@
       _cfg->C->record_method_not_compilable("too many D-U pinch points");
       return;
     }
-    _bbs.map(pinch->_idx,b);      // Pretend it's valid in this block (lazy init)
+    _cfg->map_node_to_block(pinch, b);      // Pretend it's valid in this block (lazy init)
     _reg_node.map(def_reg,pinch); // Record pinch-point
     //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
     if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
@@ -2713,9 +2712,9 @@
     return;
   Node *pinch = _reg_node[use_reg]; // Get pinch point
   // Check for no later def_reg/kill in block
-  if( pinch && _bbs[pinch->_idx] == b &&
+  if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
       // Use has to be block-local as well
-      _bbs[use->_idx] == b ) {
+      _cfg->get_block_for_node(use) == b) {
     if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
         pinch->req() == 1 ) {   // pinch not yet in block?
       pinch->del_req(0);        // yank pointer to later-def, also set flag
@@ -2895,7 +2894,7 @@
     int trace_cnt = 0;
     for (uint k = 0; k < _reg_node.Size(); k++) {
       Node* pinch = _reg_node[k];
-      if (pinch != NULL && pinch->Opcode() == Op_Node &&
+      if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
           // no predecence input edges
           (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
         cleanup_pinch(pinch);
--- a/hotspot/src/share/vm/opto/output.hpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/output.hpp	Wed Aug 07 17:56:19 2013 +0200
@@ -96,9 +96,6 @@
   // List of nodes currently available for choosing for scheduling
   Node_List _available;
 
-  // Mapping from node (index) to basic block
-  Block_Array& _bbs;
-
   // For each instruction beginning a bundle, the number of following
   // nodes to be bundled with it.
   Bundle *_node_bundling_base;
--- a/hotspot/src/share/vm/opto/postaloc.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/postaloc.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -78,11 +78,13 @@
 // Helper function for yank_if_dead
 int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
   int blk_adjust=0;
-  Block *oldb = _cfg._bbs[old->_idx];
+  Block *oldb = _cfg.get_block_for_node(old);
   oldb->find_remove(old);
   // Count 1 if deleting an instruction from the current block
-  if( oldb == current_block ) blk_adjust++;
-  _cfg._bbs.map(old->_idx,NULL);
+  if (oldb == current_block) {
+    blk_adjust++;
+  }
+  _cfg.unmap_node_from_block(old);
   OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
   if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
     value->map(old_reg,NULL);  // Yank from value/regnd maps
@@ -433,7 +435,7 @@
     bool missing_some_inputs = false;
     Block *freed = NULL;
     for( j = 1; j < b->num_preds(); j++ ) {
-      Block *pb = _cfg._bbs[b->pred(j)->_idx];
+      Block *pb = _cfg.get_block_for_node(b->pred(j));
       // Remove copies along phi edges
       for( uint k=1; k<phi_dex; k++ )
         elide_copy( b->_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false );
@@ -478,7 +480,7 @@
     } else {
       if( !freed ) {            // Didn't get a freebie prior block
         // Must clone some data
-        freed = _cfg._bbs[b->pred(1)->_idx];
+        freed = _cfg.get_block_for_node(b->pred(1));
         Node_List &f_value = *blk2value[freed->_pre_order];
         Node_List &f_regnd = *blk2regnd[freed->_pre_order];
         for( uint k = 0; k < (uint)_max_reg; k++ ) {
@@ -488,7 +490,7 @@
       }
       // Merge all inputs together, setting to NULL any conflicts.
       for( j = 1; j < b->num_preds(); j++ ) {
-        Block *pb = _cfg._bbs[b->pred(j)->_idx];
+        Block *pb = _cfg.get_block_for_node(b->pred(j));
         if( pb == freed ) continue; // Did self already via freelist
         Node_List &p_regnd = *blk2regnd[pb->_pre_order];
         for( uint k = 0; k < (uint)_max_reg; k++ ) {
@@ -515,8 +517,9 @@
           u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
       }
       if( u != NodeSentinel ) {    // Junk Phi.  Remove
-        b->_nodes.remove(j--); phi_dex--;
-        _cfg._bbs.map(phi->_idx,NULL);
+        b->_nodes.remove(j--);
+        phi_dex--;
+        _cfg.unmap_node_from_block(phi);
         phi->replace_by(u);
         phi->disconnect_inputs(NULL, C);
         continue;
--- a/hotspot/src/share/vm/opto/reg_split.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/opto/reg_split.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -132,7 +132,7 @@
   }
 
   b->_nodes.insert(i,spill);    // Insert node in block
-  _cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect
+  _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
   // Adjust the point where we go hi-pressure
   if( i <= b->_ihrp_index ) b->_ihrp_index++;
   if( i <= b->_fhrp_index ) b->_fhrp_index++;
@@ -219,7 +219,7 @@
         use->set_req(useidx, def);
       } else {
         // Block and index where the use occurs.
-        Block *b = _cfg._bbs[use->_idx];
+        Block *b = _cfg.get_block_for_node(use);
         // Put the clone just prior to use
         int bindex = b->find_node(use);
         // DEF is UP, so must copy it DOWN and hook in USE
@@ -270,7 +270,7 @@
   int bindex;
   // Phi input spill-copys belong at the end of the prior block
   if( use->is_Phi() ) {
-    b = _cfg._bbs[b->pred(useidx)->_idx];
+    b = _cfg.get_block_for_node(b->pred(useidx));
     bindex = b->end_idx();
   } else {
     // Put the clone just prior to use
@@ -335,7 +335,7 @@
         continue;
       }
 
-      Block *b_def = _cfg._bbs[def->_idx];
+      Block *b_def = _cfg.get_block_for_node(def);
       int idx_def = b_def->find_node(def);
       Node *in_spill = get_spillcopy_wide( in, def, i );
       if( !in_spill ) return 0; // Bailed out
@@ -589,7 +589,7 @@
         UPblock[slidx] = true;
         // Record following instruction in case 'n' rematerializes and
         // kills flags
-        Block *pred1 = _cfg._bbs[b->pred(1)->_idx];
+        Block *pred1 = _cfg.get_block_for_node(b->pred(1));
         continue;
       }
 
@@ -601,7 +601,7 @@
       // Grab predecessor block header
       n1 = b->pred(1);
       // Grab the appropriate reaching def info for inpidx
-      pred = _cfg._bbs[n1->_idx];
+      pred = _cfg.get_block_for_node(n1);
       pidx = pred->_pre_order;
       Node **Ltmp = Reaches[pidx];
       bool  *Utmp = UP[pidx];
@@ -616,7 +616,7 @@
         // Grab predecessor block headers
         n2 = b->pred(inpidx);
         // Grab the appropriate reaching def info for inpidx
-        pred = _cfg._bbs[n2->_idx];
+        pred = _cfg.get_block_for_node(n2);
         pidx = pred->_pre_order;
         Ltmp = Reaches[pidx];
         Utmp = UP[pidx];
@@ -701,7 +701,7 @@
         // Grab predecessor block header
         n1 = b->pred(1);
         // Grab the appropriate reaching def info for k
-        pred = _cfg._bbs[n1->_idx];
+        pred = _cfg.get_block_for_node(n1);
         pidx = pred->_pre_order;
         Node **Ltmp = Reaches[pidx];
         bool  *Utmp = UP[pidx];
@@ -919,7 +919,7 @@
                 return 0;
               }
               _lrg_map.extend(def->_idx, 0);
-              _cfg._bbs.map(def->_idx,b);
+              _cfg.map_node_to_block(def, b);
               n->set_req(inpidx, def);
               continue;
             }
@@ -1291,7 +1291,7 @@
   for( insidx = 0; insidx < phis->size(); insidx++ ) {
     Node *phi = phis->at(insidx);
     assert(phi->is_Phi(),"This list must only contain Phi Nodes");
-    Block *b = _cfg._bbs[phi->_idx];
+    Block *b = _cfg.get_block_for_node(phi);
     // Grab the live range number
     uint lidx = _lrg_map.find_id(phi);
     uint slidx = lrg2reach[lidx];
@@ -1315,7 +1315,7 @@
     // DEF has the wrong UP/DOWN value.
     for( uint i = 1; i < b->num_preds(); i++ ) {
       // Get predecessor block pre-order number
-      Block *pred = _cfg._bbs[b->pred(i)->_idx];
+      Block *pred = _cfg.get_block_for_node(b->pred(i));
       pidx = pred->_pre_order;
       // Grab reaching def
       Node *def = Reaches[pidx][slidx];
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Mon Aug 05 15:03:40 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Aug 07 17:56:19 2013 +0200
@@ -1098,7 +1098,7 @@
                                                                                                                                      \
   c2_nonstatic_field(PhaseCFG,           _num_blocks,              uint)                                                             \
   c2_nonstatic_field(PhaseCFG,           _blocks,                  Block_List)                                                       \
-  c2_nonstatic_field(PhaseCFG,           _bbs,                     Block_Array)                                                      \
+  c2_nonstatic_field(PhaseCFG,           _node_to_block_mapping,   Block_Array)                                                      \
   c2_nonstatic_field(PhaseCFG,           _broot,                   Block*)                                                           \
                                                                                                                                      \
   c2_nonstatic_field(PhaseRegAlloc,      _node_regs,               OptoRegPair*)                                                     \