Merge
authorrbackman
Mon, 02 Sep 2013 13:13:45 +0200
changeset 19723 79c3f91dc0fa
parent 19678 dee3b9716682 (current diff)
parent 19722 e8e0031ea96d (diff)
child 19724 1e308c6b0de3
Merge
hotspot/src/share/vm/runtime/thread.cpp
--- a/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -307,7 +307,7 @@
       assert(a_byte == *start++, "should be the same code");
     }
 #endif
-  } else if (_id == load_mirror_id) {
+  } else if (_id == load_mirror_id || _id == load_appendix_id) {
     // produce a copy of the load mirror instruction for use by the being initialized case
 #ifdef ASSERT
     address start = __ pc();
@@ -384,6 +384,7 @@
     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -397,7 +398,7 @@
   ce->add_call_info_here(_info);
   __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
   __ delayed()->nop();
-  if (_id == load_klass_id || _id == load_mirror_id) {
+  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
     CodeSection* cs = __ code_section();
     address pc = (address)_pc_start;
     RelocIterator iter(cs, pc, pc + 1);
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -520,7 +520,7 @@
 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
   // Allocate a new index in table to hold the object once it's been patched
   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
-  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
+  PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
 
   AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
   assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -804,6 +804,12 @@
       }
       break;
 
+    case load_appendix_patching_id:
+      { __ set_info("load_appendix_patching", dont_gc_arguments);
+        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
+      }
+      break;
+
     case dtrace_object_alloc_id:
       { // O0: object
         __ set_info("dtrace_object_alloc", dont_gc_arguments);
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -402,6 +402,7 @@
     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -419,7 +420,7 @@
   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
     __ nop();
   }
-  if (_id == load_klass_id || _id == load_mirror_id) {
+  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
     CodeSection* cs = __ code_section();
     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -362,7 +362,7 @@
 
 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
   jobject o = NULL;
-  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id);
+  PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
   __ movoop(reg, o);
   patching_epilog(patch, lir_patch_normal, reg, info);
 }
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1499,6 +1499,13 @@
       }
       break;
 
+    case load_appendix_patching_id:
+      { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
+        // we should set up register map
+        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
+      }
+      break;
+
     case dtrace_object_alloc_id:
       { // rax,: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
--- a/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Mon Sep 02 13:13:45 2013 +0200
@@ -106,10 +106,12 @@
                         " (" + getMethod().getBytes() + " bytes) " + getReason());
             }
         }
+        stream.printf(" (end time: %6.4f", getTimeStamp());
         if (getEndNodes() > 0) {
-            stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes());
+            stream.printf(" nodes: %d live: %d", getEndNodes(), getEndLiveNodes());
         }
-        stream.println("");
+        stream.println(")");
+
         if (getReceiver() != null) {
             emit(stream, indent + 4);
             //                 stream.println("type profile " + method.holder + " -> " + receiver + " (" +
--- a/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Mon Sep 02 13:13:45 2013 +0200
@@ -207,7 +207,12 @@
     }
 
     String search(Attributes attr, String name) {
-        return search(attr, name, null);
+        String result = attr.getValue(name);
+        if (result != null) {
+            return result;
+        } else {
+            throw new InternalError("can't find " + name);
+        }
     }
 
     String search(Attributes attr, String name, String defaultValue) {
@@ -215,13 +220,7 @@
         if (result != null) {
             return result;
         }
-        if (defaultValue != null) {
-            return defaultValue;
-        }
-        for (int i = 0; i < attr.getLength(); i++) {
-            System.out.println(attr.getQName(i) + " " + attr.getValue(attr.getQName(i)));
-        }
-        throw new InternalError("can't find " + name);
+        return defaultValue;
     }
     int indent = 0;
 
@@ -268,17 +267,18 @@
             Phase p = new Phase(search(atts, "name"),
                     Double.parseDouble(search(atts, "stamp")),
                     Integer.parseInt(search(atts, "nodes", "0")),
-                    Integer.parseInt(search(atts, "live")));
+                    Integer.parseInt(search(atts, "live", "0")));
             phaseStack.push(p);
         } else if (qname.equals("phase_done")) {
             Phase p = phaseStack.pop();
-            if (! p.getId().equals(search(atts, "name"))) {
+            String phaseName = search(atts, "name", null);
+            if (phaseName != null && !p.getId().equals(phaseName)) {
                 System.out.println("phase: " + p.getId());
                 throw new InternalError("phase name mismatch");
             }
             p.setEnd(Double.parseDouble(search(atts, "stamp")));
             p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
-            p.setEndLiveNodes(Integer.parseInt(search(atts, "live")));
+            p.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
             compile.getPhases().add(p);
         } else if (qname.equals("task")) {
             compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
@@ -413,8 +413,8 @@
             }
         } else if (qname.equals("parse_done")) {
             CallSite call = scopes.pop();
-            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
-            call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1")));
+            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
+            call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
             call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
             scopes.push(call);
         }
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1095,7 +1095,7 @@
         fprintf(fp, "  // Identify previous instruction if inside this block\n");
         fprintf(fp, "  if( ");
         print_block_index(fp, inst_position);
-        fprintf(fp, " > 0 ) {\n    Node *n = block->_nodes.at(");
+        fprintf(fp, " > 0 ) {\n    Node *n = block->get_node(");
         print_block_index(fp, inst_position);
         fprintf(fp, ");\n    inst%d = (n->is_Mach()) ? ", inst_position);
         fprintf(fp, "n->as_Mach() : NULL;\n  }\n");
--- a/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -364,7 +364,8 @@
   enum PatchID {
     access_field_id,
     load_klass_id,
-    load_mirror_id
+    load_mirror_id,
+    load_appendix_id
   };
   enum constants {
     patch_info_size = 3
@@ -417,7 +418,7 @@
       }
       NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
       n_move->set_offset(field_offset);
-    } else if (_id == load_klass_id || _id == load_mirror_id) {
+    } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
       assert(_obj != noreg, "must have register object for load_klass/load_mirror");
 #ifdef ASSERT
       // verify that we're pointing at a NativeMovConstReg
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -74,16 +74,19 @@
  private:
   JavaThread* _thread;
   CompileLog* _log;
+  TimerName _timer;
 
  public:
   PhaseTraceTime(TimerName timer)
-  : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) {
+  : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose),
+    _log(NULL), _timer(timer)
+  {
     if (Compilation::current() != NULL) {
       _log = Compilation::current()->log();
     }
 
     if (_log != NULL) {
-      _log->begin_head("phase name='%s'", timer_name[timer]);
+      _log->begin_head("phase name='%s'", timer_name[_timer]);
       _log->stamp();
       _log->end_head();
     }
@@ -91,7 +94,7 @@
 
   ~PhaseTraceTime() {
     if (_log != NULL)
-      _log->done("phase");
+      _log->done("phase name='%s'", timer_name[_timer]);
   }
 };
 
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1583,7 +1583,7 @@
       ObjectType* obj_type = obj->type()->as_ObjectType();
       if (obj_type->is_constant() && !PatchALot) {
         ciObject* const_oop = obj_type->constant_value();
-        if (!const_oop->is_null_object()) {
+        if (!const_oop->is_null_object() && const_oop->is_loaded()) {
           if (field->is_constant()) {
             ciConstant field_val = field->constant_value_of(const_oop);
             BasicType field_type = field_val.basic_type();
@@ -1667,9 +1667,8 @@
   const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
   assert(declared_signature != NULL, "cannot be null");
 
-  // FIXME bail out for now
-  if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
-    BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
+  if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
+    BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
   }
 
   // we have to make sure the argument size (incl. the receiver)
@@ -1713,10 +1712,23 @@
       code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
       break;
     }
+  } else {
+    if (bc_raw == Bytecodes::_invokehandle) {
+      assert(!will_link, "should come here only for unlinked call");
+      code = Bytecodes::_invokespecial;
+    }
   }
 
   // Push appendix argument (MethodType, CallSite, etc.), if one.
-  if (stream()->has_appendix()) {
+  bool patch_for_appendix = false;
+  int patching_appendix_arg = 0;
+  if (C1PatchInvokeDynamic &&
+      (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
+    Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
+    apush(arg);
+    patch_for_appendix = true;
+    patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
+  } else if (stream()->has_appendix()) {
     ciObject* appendix = stream()->get_appendix();
     Value arg = append(new Constant(new ObjectConstant(appendix)));
     apush(arg);
@@ -1732,7 +1744,8 @@
   if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
       !(// %%% FIXME: Are both of these relevant?
         target->is_method_handle_intrinsic() ||
-        target->is_compiled_lambda_form())) {
+        target->is_compiled_lambda_form()) &&
+      !patch_for_appendix) {
     Value receiver = NULL;
     ciInstanceKlass* receiver_klass = NULL;
     bool type_is_exact = false;
@@ -1850,7 +1863,8 @@
   // check if we could do inlining
   if (!PatchALot && Inline && klass->is_loaded() &&
       (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
-      && target->is_loaded()) {
+      && target->is_loaded()
+      && !patch_for_appendix) {
     // callee is known => check if we have static binding
     assert(target->is_loaded(), "callee must be known");
     if (code == Bytecodes::_invokestatic  ||
@@ -1901,7 +1915,7 @@
     code == Bytecodes::_invokespecial   ||
     code == Bytecodes::_invokevirtual   ||
     code == Bytecodes::_invokeinterface;
-  Values* args = state()->pop_arguments(target->arg_size_no_receiver());
+  Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
   Value recv = has_receiver ? apop() : NULL;
   int vtable_index = Method::invalid_vtable_index;
 
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1211,8 +1211,6 @@
   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
   bool is_method_handle_invoke() const {
     return
-      is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
-      ||
       method()->is_compiled_lambda_form()  // Java-generated adapter
       ||
       method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -93,12 +93,23 @@
       default:
         ShouldNotReachHere();
     }
+  } else if (patch->id() == PatchingStub::load_appendix_id) {
+    Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
+    assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
   } else {
     ShouldNotReachHere();
   }
 #endif
 }
 
+PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
+  IRScope* scope = info->scope();
+  Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
+  if (Bytecodes::has_optional_appendix(bc_raw)) {
+    return PatchingStub::load_appendix_id;
+  }
+  return PatchingStub::load_mirror_id;
+}
 
 //---------------------------------------------------------------
 
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -119,6 +119,8 @@
 
   void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
 
+  PatchingStub::PatchID patching_id(CodeEmitInfo* info);
+
  public:
   LIR_Assembler(Compilation* c);
   ~LIR_Assembler();
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -819,6 +819,7 @@
   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
+  Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
   bool load_klass_or_mirror_patch_id =
     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 
@@ -888,10 +889,32 @@
           mirror = Handle(THREAD, m);
         }
         break;
-      default: Unimplemented();
+      default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
     }
     // convert to handle
     load_klass = KlassHandle(THREAD, k);
+  } else if (stub_id == load_appendix_patching_id) {
+    Bytecode_invoke bytecode(caller_method, bci);
+    Bytecodes::Code bc = bytecode.invoke_code();
+
+    CallInfo info;
+    constantPoolHandle pool(thread, caller_method->constants());
+    int index = bytecode.index();
+    LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
+    appendix = info.resolved_appendix();
+    switch (bc) {
+      case Bytecodes::_invokehandle: {
+        int cache_index = ConstantPool::decode_cpcache_index(index, true);
+        assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
+        pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
+        break;
+      }
+      case Bytecodes::_invokedynamic: {
+        pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
+        break;
+      }
+      default: fatal("unexpected bytecode for load_appendix_patching_id");
+    }
   } else {
     ShouldNotReachHere();
   }
@@ -992,8 +1015,8 @@
                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
                    "illegal init value");
             if (stub_id == Runtime1::load_klass_patching_id) {
-            assert(load_klass() != NULL, "klass not set");
-            n_copy->set_data((intx) (load_klass()));
+              assert(load_klass() != NULL, "klass not set");
+              n_copy->set_data((intx) (load_klass()));
             } else {
               assert(mirror() != NULL, "klass not set");
               n_copy->set_data((intx) (mirror()));
@@ -1002,43 +1025,55 @@
             if (TracePatching) {
               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
             }
+          }
+        } else if (stub_id == Runtime1::load_appendix_patching_id) {
+          NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
+          assert(n_copy->data() == 0 ||
+                 n_copy->data() == (intptr_t)Universe::non_oop_word(),
+                 "illegal init value");
+          n_copy->set_data((intx) (appendix()));
 
-#if defined(SPARC) || defined(PPC)
-            // Update the location in the nmethod with the proper
-            // metadata.  When the code was generated, a NULL was stuffed
-            // in the metadata table and that table needs to be update to
-            // have the right value.  On intel the value is kept
-            // directly in the instruction instead of in the metadata
-            // table, so set_data above effectively updated the value.
-            nmethod* nm = CodeCache::find_nmethod(instr_pc);
-            assert(nm != NULL, "invalid nmethod_pc");
-            RelocIterator mds(nm, copy_buff, copy_buff + 1);
-            bool found = false;
-            while (mds.next() && !found) {
-              if (mds.type() == relocInfo::oop_type) {
-                assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
-                oop_Relocation* r = mds.oop_reloc();
-                oop* oop_adr = r->oop_addr();
-                *oop_adr = mirror();
-                r->fix_oop_relocation();
-                found = true;
-              } else if (mds.type() == relocInfo::metadata_type) {
-                assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
-                metadata_Relocation* r = mds.metadata_reloc();
-                Metadata** metadata_adr = r->metadata_addr();
-                *metadata_adr = load_klass();
-                r->fix_metadata_relocation();
-                found = true;
-              }
-            }
-            assert(found, "the metadata must exist!");
-#endif
-
+          if (TracePatching) {
+            Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
           }
         } else {
           ShouldNotReachHere();
         }
 
+#if defined(SPARC) || defined(PPC)
+        if (load_klass_or_mirror_patch_id ||
+            stub_id == Runtime1::load_appendix_patching_id) {
+          // Update the location in the nmethod with the proper
+          // metadata.  When the code was generated, a NULL was stuffed
+          // in the metadata table and that table needs to be update to
+          // have the right value.  On intel the value is kept
+          // directly in the instruction instead of in the metadata
+          // table, so set_data above effectively updated the value.
+          nmethod* nm = CodeCache::find_nmethod(instr_pc);
+          assert(nm != NULL, "invalid nmethod_pc");
+          RelocIterator mds(nm, copy_buff, copy_buff + 1);
+          bool found = false;
+          while (mds.next() && !found) {
+            if (mds.type() == relocInfo::oop_type) {
+              assert(stub_id == Runtime1::load_mirror_patching_id ||
+                     stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
+              oop_Relocation* r = mds.oop_reloc();
+              oop* oop_adr = r->oop_addr();
+              *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
+              r->fix_oop_relocation();
+              found = true;
+            } else if (mds.type() == relocInfo::metadata_type) {
+              assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
+              metadata_Relocation* r = mds.metadata_reloc();
+              Metadata** metadata_adr = r->metadata_addr();
+              *metadata_adr = load_klass();
+              r->fix_metadata_relocation();
+              found = true;
+            }
+          }
+          assert(found, "the metadata must exist!");
+        }
+#endif
         if (do_patch) {
           // replace instructions
           // first replace the tail, then the call
@@ -1077,7 +1112,8 @@
           ICache::invalidate_range(instr_pc, *byte_count);
           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
 
-          if (load_klass_or_mirror_patch_id) {
+          if (load_klass_or_mirror_patch_id ||
+              stub_id == Runtime1::load_appendix_patching_id) {
             relocInfo::relocType rtype =
               (stub_id == Runtime1::load_klass_patching_id) ?
                                    relocInfo::metadata_type :
@@ -1118,7 +1154,8 @@
 
   // If we are patching in a non-perm oop, make sure the nmethod
   // is on the right list.
-  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
+  if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
+                              (appendix.not_null() && appendix->is_scavengable()))) {
     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
@@ -1179,6 +1216,24 @@
   return caller_is_deopted();
 }
 
+int Runtime1::move_appendix_patching(JavaThread* thread) {
+//
+// NOTE: we are still in Java
+//
+  Thread* THREAD = thread;
+  debug_only(NoHandleMark nhm;)
+  {
+    // Enter VM mode
+
+    ResetNoHandleMark rnhm;
+    patch_code(thread, load_appendix_patching_id);
+  }
+  // Back in JAVA, use no oops DON'T safepoint
+
+  // Return true if calling code is deoptimized
+
+  return caller_is_deopted();
+}
 //
 // Entry point for compiled code. We want to patch a nmethod.
 // We don't do a normal VM transition here because we want to
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -67,6 +67,7 @@
   stub(access_field_patching)        \
   stub(load_klass_patching)          \
   stub(load_mirror_patching)         \
+  stub(load_appendix_patching)       \
   stub(g1_pre_barrier_slow)          \
   stub(g1_post_barrier_slow)         \
   stub(fpu2long_stub)                \
@@ -160,6 +161,7 @@
   static int access_field_patching(JavaThread* thread);
   static int move_klass_patching(JavaThread* thread);
   static int move_mirror_patching(JavaThread* thread);
+  static int move_appendix_patching(JavaThread* thread);
 
   static void patch_code(JavaThread* thread, StubID stub_id);
 
--- a/hotspot/src/share/vm/c1/c1_globals.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_globals.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -25,4 +25,4 @@
 #include "precompiled.hpp"
 #include "c1/c1_globals.hpp"
 
-C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/c1/c1_globals.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -54,7 +54,7 @@
 //
 // Defines all global flags used by the client compiler.
 //
-#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
+#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
                                                                             \
   /* Printing */                                                            \
   notproduct(bool, PrintC1Statistics, false,                                \
@@ -333,15 +333,19 @@
           "Use CHA and exact type results at call sites when updating MDOs")\
                                                                             \
   product(bool, C1UpdateMethodData, trueInTiered,                           \
-          "Update MethodData*s in Tier1-generated code")                  \
+          "Update MethodData*s in Tier1-generated code")                    \
                                                                             \
   develop(bool, PrintCFGToFile, false,                                      \
           "print control flow graph to a separate file during compilation") \
                                                                             \
+  diagnostic(bool, C1PatchInvokeDynamic, true,                              \
+             "Patch invokedynamic appendix not known at compile time")      \
+                                                                            \
+                                                                            \
 
 
 // Read default values for c1 globals
 
-C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
 
 #endif // SHARE_VM_C1_C1_GLOBALS_HPP
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1150,6 +1150,10 @@
   record_method_not_compilable("out of memory");
 }
 
+ciInstance* ciEnv::unloaded_ciinstance() {
+  GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
+}
+
 void ciEnv::dump_replay_data(outputStream* out) {
   VM_ENTRY_MARK;
   MutexLocker ml(Compile_lock);
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -400,6 +400,7 @@
   static ciInstanceKlass* unloaded_ciinstance_klass() {
     return _unloaded_ciinstance_klass;
   }
+  ciInstance* unloaded_ciinstance();
 
   ciKlass*  find_system_klass(ciSymbol* klass_name);
   // Note:  To find a class from its name string, use ciSymbol::make,
--- a/hotspot/src/share/vm/ci/ciInstance.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciInstance.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -60,10 +60,10 @@
 //
 // Constant value of a field.
 ciConstant ciInstance::field_value(ciField* field) {
-  assert(is_loaded() &&
-         field->holder()->is_loaded() &&
-         klass()->is_subclass_of(field->holder()),
-         "invalid access");
+  assert(is_loaded(), "invalid access - must be loaded");
+  assert(field->holder()->is_loaded(), "invalid access - holder must be loaded");
+  assert(klass()->is_subclass_of(field->holder()), "invalid access - must be subclass");
+
   VM_ENTRY_MARK;
   ciConstant result;
   Handle obj = get_oop();
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -177,6 +177,10 @@
     address bcp = code() + bci;
     return Bytecodes::java_code_at(NULL, bcp);
   }
+  Bytecodes::Code raw_code_at_bci(int bci) {
+    address bcp = code() + bci;
+    return Bytecodes::code_at(NULL, bcp);
+  }
   BCEscapeAnalyzer  *get_bcea();
   ciMethodBlocks    *get_method_blocks();
 
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -563,7 +563,10 @@
   return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
 }
 
-
+ciInstance* ciObjectFactory::get_unloaded_object_constant() {
+  if (ciEnv::_Object_klass == NULL)  return NULL;
+  return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
+}
 
 //------------------------------------------------------------------
 // ciObjectFactory::get_empty_methodData
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -131,6 +131,8 @@
   ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
 
 
+  ciInstance* get_unloaded_object_constant();
+
   // Get the ciMethodData representing the methodData for a method
   // with none.
   ciMethodData* get_empty_methodData();
--- a/hotspot/src/share/vm/code/nmethod.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -93,18 +93,21 @@
 #endif
 
 bool nmethod::is_compiled_by_c1() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_c1();
 }
 bool nmethod::is_compiled_by_c2() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_c2();
 }
 bool nmethod::is_compiled_by_shark() const {
-  if (is_native_method()) return false;
-  assert(compiler() != NULL, "must be");
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_shark();
 }
 
@@ -1401,6 +1404,9 @@
     // nmethods aren't scanned for GC.
     _oops_are_stale = true;
 #endif
+     // the Method may be reclaimed by class unloading now that the
+     // nmethod is in zombie state
+    set_method(NULL);
   } else {
     assert(state == not_entrant, "other cases may need to be handled differently");
   }
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1718,7 +1718,7 @@
     CodeCache::print_summary(&s, detailed);
   }
   ttyLocker ttyl;
-  tty->print_cr(s.as_string());
+  tty->print(s.as_string());
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/oops/method.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -720,11 +720,22 @@
   }
 }
 
+bool Method::is_always_compilable() const {
+  // Generated adapters must be compiled
+  if (is_method_handle_intrinsic() && is_synthetic()) {
+    assert(!is_not_c1_compilable(), "sanity check");
+    assert(!is_not_c2_compilable(), "sanity check");
+    return true;
+  }
+
+  return false;
+}
+
 bool Method::is_not_compilable(int comp_level) const {
   if (number_of_breakpoints() > 0)
     return true;
-  if (is_method_handle_intrinsic())
-    return !is_synthetic();  // the generated adapters must be compiled
+  if (is_always_compilable())
+    return false;
   if (comp_level == CompLevel_any)
     return is_not_c1_compilable() || is_not_c2_compilable();
   if (is_c1_compile(comp_level))
@@ -736,6 +747,10 @@
 
 // call this when compiler finds that this method is not compilable
 void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
+  if (is_always_compilable()) {
+    // Don't mark a method which should be always compilable
+    return;
+  }
   print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
   if (comp_level == CompLevel_all) {
     set_not_c1_compilable();
--- a/hotspot/src/share/vm/oops/method.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -796,6 +796,7 @@
   void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
     set_not_osr_compilable(comp_level, false);
   }
+  bool is_always_compilable() const;
 
  private:
   void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
--- a/hotspot/src/share/vm/opto/block.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/block.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -112,9 +112,9 @@
 // exceeds OptoLoopAlignment.
 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
                                     PhaseRegAlloc* ra) {
-  uint last_inst = _nodes.size();
+  uint last_inst = number_of_nodes();
   for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
-    uint inst_size = _nodes[j]->size(ra);
+    uint inst_size = get_node(j)->size(ra);
     if( inst_size > 0 ) {
       inst_cnt--;
       uint sz = sum_size + inst_size;
@@ -131,8 +131,8 @@
 }
 
 uint Block::find_node( const Node *n ) const {
-  for( uint i = 0; i < _nodes.size(); i++ ) {
-    if( _nodes[i] == n )
+  for( uint i = 0; i < number_of_nodes(); i++ ) {
+    if( get_node(i) == n )
       return i;
   }
   ShouldNotReachHere();
@@ -141,7 +141,7 @@
 
 // Find and remove n from block list
 void Block::find_remove( const Node *n ) {
-  _nodes.remove(find_node(n));
+  remove_node(find_node(n));
 }
 
 // Return empty status of a block.  Empty blocks contain only the head, other
@@ -154,10 +154,10 @@
   }
 
   int success_result = completely_empty;
-  int end_idx = _nodes.size()-1;
+  int end_idx = number_of_nodes() - 1;
 
   // Check for ending goto
-  if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
+  if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
     success_result = empty_with_goto;
     end_idx--;
   }
@@ -170,7 +170,7 @@
   // Ideal nodes are allowable in empty blocks: skip them  Only MachNodes
   // turn directly into code, because only MachNodes have non-trivial
   // emit() functions.
-  while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
+  while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
     end_idx--;
   }
 
@@ -209,15 +209,15 @@
 
 // True if block is low enough frequency or guarded by a test which
 // mostly does not go here.
-bool Block::is_uncommon(PhaseCFG* cfg) const {
+bool PhaseCFG::is_uncommon(const Block* block) {
   // Initial blocks must never be moved, so are never uncommon.
-  if (head()->is_Root() || head()->is_Start())  return false;
+  if (block->head()->is_Root() || block->head()->is_Start())  return false;
 
   // Check for way-low freq
-  if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
+  if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
 
   // Look for code shape indicating uncommon_trap or slow path
-  if (has_uncommon_code()) return true;
+  if (block->has_uncommon_code()) return true;
 
   const float epsilon = 0.05f;
   const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
@@ -225,8 +225,8 @@
   uint freq_preds = 0;
   uint uncommon_for_freq_preds = 0;
 
-  for( uint i=1; i<num_preds(); i++ ) {
-    Block* guard = cfg->get_block_for_node(pred(i));
+  for( uint i=1; i< block->num_preds(); i++ ) {
+    Block* guard = get_block_for_node(block->pred(i));
     // Check to see if this block follows its guard 1 time out of 10000
     // or less.
     //
@@ -244,14 +244,14 @@
       uncommon_preds++;
     } else {
       freq_preds++;
-      if( _freq < guard->_freq * guard_factor ) {
+      if(block->_freq < guard->_freq * guard_factor ) {
         uncommon_for_freq_preds++;
       }
     }
   }
-  if( num_preds() > 1 &&
+  if( block->num_preds() > 1 &&
       // The block is uncommon if all preds are uncommon or
-      (uncommon_preds == (num_preds()-1) ||
+      (uncommon_preds == (block->num_preds()-1) ||
       // it is uncommon for all frequent preds.
        uncommon_for_freq_preds == freq_preds) ) {
     return true;
@@ -344,8 +344,8 @@
 
 void Block::dump(const PhaseCFG* cfg) const {
   dump_head(cfg);
-  for (uint i=0; i< _nodes.size(); i++) {
-    _nodes[i]->dump();
+  for (uint i=0; i< number_of_nodes(); i++) {
+    get_node(i)->dump();
   }
   tty->print("\n");
 }
@@ -434,7 +434,7 @@
       map_node_to_block(p, bb);
       map_node_to_block(x, bb);
       if( x != p ) {                // Only for root is x == p
-        bb->_nodes.push((Node*)x);
+        bb->push_node((Node*)x);
       }
       // Now handle predecessors
       ++sum;                        // Count 1 for self block
@@ -469,11 +469,11 @@
         assert( x != proj, "" );
         // Map basic block of projection
         map_node_to_block(proj, pb);
-        pb->_nodes.push(proj);
+        pb->push_node(proj);
       }
       // Insert self as a child of my predecessor block
       pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
-      assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
+      assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
               "too many control users, not a CFG?" );
     }
   }
@@ -495,7 +495,7 @@
   // surrounding blocks.
   float freq = in->_freq * in->succ_prob(succ_no);
   // get ProjNode corresponding to the succ_no'th successor of the in block
-  ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
+  ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
   // create region for basic block
   RegionNode* region = new (C) RegionNode(2);
   region->init_req(1, proj);
@@ -507,7 +507,7 @@
   Node* gto = _goto->clone(); // get a new goto node
   gto->set_req(0, region);
   // add it to the basic block
-  block->_nodes.push(gto);
+  block->push_node(gto);
   map_node_to_block(gto, block);
   C->regalloc()->set_bad(gto->_idx);
   // hook up successor block
@@ -527,9 +527,9 @@
 // Does this block end in a multiway branch that cannot have the default case
 // flipped for another case?
 static bool no_flip_branch( Block *b ) {
-  int branch_idx = b->_nodes.size() - b->_num_succs-1;
+  int branch_idx = b->number_of_nodes() - b->_num_succs-1;
   if( branch_idx < 1 ) return false;
-  Node *bra = b->_nodes[branch_idx];
+  Node *bra = b->get_node(branch_idx);
   if( bra->is_Catch() )
     return true;
   if( bra->is_Mach() ) {
@@ -550,16 +550,16 @@
 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
   // Find true target
   int end_idx = b->end_idx();
-  int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
+  int idx = b->get_node(end_idx+1)->as_Proj()->_con;
   Block *succ = b->_succs[idx];
   Node* gto = _goto->clone(); // get a new goto node
   gto->set_req(0, b->head());
-  Node *bp = b->_nodes[end_idx];
-  b->_nodes.map(end_idx,gto); // Slam over NeverBranch
+  Node *bp = b->get_node(end_idx);
+  b->map_node(gto, end_idx); // Slam over NeverBranch
   map_node_to_block(gto, b);
   C->regalloc()->set_bad(gto->_idx);
-  b->_nodes.pop();              // Yank projections
-  b->_nodes.pop();              // Yank projections
+  b->pop_node();              // Yank projections
+  b->pop_node();              // Yank projections
   b->_succs.map(0,succ);        // Map only successor
   b->_num_succs = 1;
   // remap successor's predecessors if necessary
@@ -575,8 +575,8 @@
   // Scan through block, yanking dead path from
   // all regions and phis.
   dead->head()->del_req(j);
-  for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
-    dead->_nodes[k]->del_req(j);
+  for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
+    dead->get_node(k)->del_req(j);
 }
 
 // Helper function to move block bx to the slot following b_index. Return
@@ -620,7 +620,7 @@
   if (e != Block::not_empty) {
     if (e == Block::empty_with_goto) {
       // Remove the goto, but leave the block.
-      b->_nodes.pop();
+      b->pop_node();
     }
     // Mark this block as a connector block, which will cause it to be
     // ignored in certain functions such as non_connector_successor().
@@ -663,13 +663,13 @@
     // to give a fake exit path to infinite loops.  At this late stage they
     // need to turn into Goto's so that when you enter the infinite loop you
     // indeed hang.
-    if (block->_nodes[block->end_idx()]->Opcode() == Op_NeverBranch) {
+    if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
       convert_NeverBranch_to_Goto(block);
     }
 
     // Look for uncommon blocks and move to end.
     if (!C->do_freq_based_layout()) {
-      if (block->is_uncommon(this)) {
+      if (is_uncommon(block)) {
         move_to_end(block, i);
         last--;                   // No longer check for being uncommon!
         if (no_flip_branch(block)) { // Fall-thru case must follow?
@@ -720,9 +720,9 @@
     // exchange the true and false targets.
     if (no_flip_branch(block)) {
       // Find fall through case - if must fall into its target
-      int branch_idx = block->_nodes.size() - block->_num_succs;
+      int branch_idx = block->number_of_nodes() - block->_num_succs;
       for (uint j2 = 0; j2 < block->_num_succs; j2++) {
-        const ProjNode* p = block->_nodes[branch_idx + j2]->as_Proj();
+        const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
         if (p->_con == 0) {
           // successor j2 is fall through case
           if (block->non_connector_successor(j2) != bnext) {
@@ -743,14 +743,14 @@
 
       // Remove all CatchProjs
       for (uint j = 0; j < block->_num_succs; j++) {
-        block->_nodes.pop();
+        block->pop_node();
       }
 
     } else if (block->_num_succs == 1) {
       // Block ends in a Goto?
       if (bnext == bs0) {
         // We fall into next block; remove the Goto
-        block->_nodes.pop();
+        block->pop_node();
       }
 
     } else if(block->_num_succs == 2) { // Block ends in a If?
@@ -759,9 +759,9 @@
       //       be projections (in any order), the 3rd last node must be
       //       the IfNode (we have excluded other 2-way exits such as
       //       CatchNodes already).
-      MachNode* iff   = block->_nodes[block->_nodes.size() - 3]->as_Mach();
-      ProjNode* proj0 = block->_nodes[block->_nodes.size() - 2]->as_Proj();
-      ProjNode* proj1 = block->_nodes[block->_nodes.size() - 1]->as_Proj();
+      MachNode* iff   = block->get_node(block->number_of_nodes() - 3)->as_Mach();
+      ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
+      ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
 
       // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
       assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
@@ -833,8 +833,8 @@
         iff->as_MachIf()->negate();
       }
 
-      block->_nodes.pop();          // Remove IfFalse & IfTrue projections
-      block->_nodes.pop();
+      block->pop_node();          // Remove IfFalse & IfTrue projections
+      block->pop_node();
 
     } else {
       // Multi-exit block, e.g. a switch statement
@@ -895,13 +895,13 @@
   // Verify sane CFG
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    uint cnt = block->_nodes.size();
+    uint cnt = block->number_of_nodes();
     uint j;
     for (j = 0; j < cnt; j++)  {
-      Node *n = block->_nodes[j];
+      Node *n = block->get_node(j);
       assert(get_block_for_node(n) == block, "");
       if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
-        assert(j == 1 || block->_nodes[j-1]->is_Phi(), "CreateEx must be first instruction in block");
+        assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
       }
       for (uint k = 0; k < n->req(); k++) {
         Node *def = n->in(k);
@@ -930,14 +930,14 @@
     }
 
     j = block->end_idx();
-    Node* bp = (Node*)block->_nodes[block->_nodes.size() - 1]->is_block_proj();
+    Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
     assert(bp, "last instruction must be a block proj");
-    assert(bp == block->_nodes[j], "wrong number of successors for this block");
+    assert(bp == block->get_node(j), "wrong number of successors for this block");
     if (bp->is_Catch()) {
-      while (block->_nodes[--j]->is_MachProj()) {
+      while (block->get_node(--j)->is_MachProj()) {
         ;
       }
-      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
+      assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
     } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
       assert(block->_num_succs == 2, "Conditional branch must have two targets");
     }
@@ -1440,9 +1440,9 @@
           Block *bnext = next(b);
           Block *bs0 = b->non_connector_successor(0);
 
-          MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
-          ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
-          ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
+          MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
+          ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
+          ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
 
           if (bnext == bs0) {
             // Fall-thru case in succs[0], should be in succs[1]
@@ -1454,8 +1454,8 @@
             b->_succs.map( 1, tbs0 );
 
             // Flip projections to match targets
-            b->_nodes.map(b->_nodes.size()-2, proj1);
-            b->_nodes.map(b->_nodes.size()-1, proj0);
+            b->map_node(proj1, b->number_of_nodes() - 2);
+            b->map_node(proj0, b->number_of_nodes() - 1);
           }
         }
       }
--- a/hotspot/src/share/vm/opto/block.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/block.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -105,15 +105,53 @@
 // any optimization pass.  They are created late in the game.
 class Block : public CFGElement {
   friend class VMStructs;
- public:
+
+private:
   // Nodes in this block, in order
   Node_List _nodes;
 
+public:
+
+  // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
+  Node* get_node(uint at_index) const {
+    return _nodes[at_index];
+  }
+
+  // Get the number of nodes in this block
+  uint number_of_nodes() const {
+    return _nodes.size();
+  }
+
+  // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
+  void map_node(Node* node, uint to_index) {
+    _nodes.map(to_index, node);
+  }
+
+  // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
+  void insert_node(Node* node, uint at_index) {
+    _nodes.insert(at_index, node);
+  }
+
+  // Remove a node at index 'at_index'
+  void remove_node(uint at_index) {
+    _nodes.remove(at_index);
+  }
+
+  // Push a node 'node' onto the node list
+  void push_node(Node* node) {
+    _nodes.push(node);
+  }
+
+  // Pop the last node off the node list
+  Node* pop_node() {
+    return _nodes.pop();
+  }
+
   // Basic blocks have a Node which defines Control for all Nodes pinned in
   // this block.  This Node is a RegionNode.  Exception-causing Nodes
   // (division, subroutines) and Phi functions are always pinned.  Later,
   // every Node will get pinned to some block.
-  Node *head() const { return _nodes[0]; }
+  Node *head() const { return get_node(0); }
 
   // CAUTION: num_preds() is ONE based, so that predecessor numbers match
   // input edges to Regions and Phis.
@@ -274,29 +312,12 @@
 
   // Add an instruction to an existing block.  It must go after the head
   // instruction and before the end instruction.
-  void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
+  void add_inst( Node *n ) { insert_node(n, end_idx()); }
   // Find node in block
   uint find_node( const Node *n ) const;
   // Find and remove n from block list
   void find_remove( const Node *n );
 
-  // helper function that adds caller save registers to MachProjNode
-  void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
-  // Schedule a call next in the block
-  uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
-
-  // Perform basic-block local scheduling
-  Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
-  void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
-  void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
-  bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
-  // Cleanup if any code lands between a Call and his Catch
-  void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
-  // Detect implicit-null-check opportunities.  Basically, find NULL checks
-  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
-  // I can generate a memory op if there is not one nearby.
-  void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
-
   // Return the empty status of a block
   enum { not_empty, empty_with_goto, completely_empty };
   int is_Empty() const;
@@ -328,10 +349,6 @@
   // Examine block's code shape to predict if it is not commonly executed.
   bool has_uncommon_code() const;
 
-  // Use frequency calculations and code shape to predict if the block
-  // is uncommon.
-  bool is_uncommon(PhaseCFG* cfg) const;
-
 #ifndef PRODUCT
   // Debugging print of basic block
   void dump_bidx(const Block* orig, outputStream* st = tty) const;
@@ -414,6 +431,27 @@
   // to late. Helper for schedule_late.
   Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
 
+  bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
+  void set_next_call(Block* block, Node* n, VectorSet& next_call);
+  void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
+
+  // Perform basic-block local scheduling
+  Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
+
+  // Schedule a call next in the block
+  uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
+
+  // Cleanup if any code lands between a Call and his Catch
+  void call_catch_cleanup(Block* block);
+
+  Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
+  void  catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
+
+  // Detect implicit-null-check opportunities.  Basically, find NULL checks
+  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
+  // I can generate a memory op if there is not one nearby.
+  void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
+
   // Perform a Depth First Search (DFS).
   // Setup 'vertex' as DFS to vertex mapping.
   // Setup 'semi' as vertex to DFS mapping.
@@ -530,6 +568,10 @@
     return (_node_to_block_mapping.lookup(node->_idx) != NULL);
   }
 
+  // Use frequency calculations and code shape to predict if the block
+  // is uncommon.
+  bool is_uncommon(const Block* block);
+
 #ifdef ASSERT
   Unique_Node_List _raw_oops;
 #endif
@@ -550,7 +592,7 @@
 
   // Insert a node into a block at index and map the node to the block
   void insert(Block *b, uint idx, Node *n) {
-    b->_nodes.insert( idx, n );
+    b->insert_node(n , idx);
     map_node_to_block(n, b);
   }
 
--- a/hotspot/src/share/vm/opto/buildOopMap.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/buildOopMap.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -121,8 +121,8 @@
 // Given reaching-defs for this block start, compute it for this block end
 void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
 
-  for( uint i=0; i<_b->_nodes.size(); i++ ) {
-    Node *n = _b->_nodes[i];
+  for( uint i=0; i<_b->number_of_nodes(); i++ ) {
+    Node *n = _b->get_node(i);
 
     if( n->jvms() ) {           // Build an OopMap here?
       JVMState *jvms = n->jvms();
@@ -447,8 +447,8 @@
       }
 
       // Now walk tmp_live up the block backwards, computing live
-      for( int k=b->_nodes.size()-1; k>=0; k-- ) {
-        Node *n = b->_nodes[k];
+      for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
+        Node *n = b->get_node(k);
         // KILL def'd bits
         int first = regalloc->get_reg_first(n);
         int second = regalloc->get_reg_second(n);
@@ -544,12 +544,12 @@
     for (i = 1; i < cfg->number_of_blocks(); i++) {
       Block* block = cfg->get_block(i);
       uint j;
-      for (j = 1; j < block->_nodes.size(); j++) {
-        if (block->_nodes[j]->jvms() && (*safehash)[block->_nodes[j]] == NULL) {
+      for (j = 1; j < block->number_of_nodes(); j++) {
+        if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
            break;
         }
       }
-      if (j < block->_nodes.size()) {
+      if (j < block->number_of_nodes()) {
         break;
       }
     }
--- a/hotspot/src/share/vm/opto/callnode.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -458,7 +458,7 @@
       st->print("={");
       uint nf = spobj->n_fields();
       if (nf > 0) {
-        uint first_ind = spobj->first_index();
+        uint first_ind = spobj->first_index(mcall->jvms());
         Node* fld_node = mcall->in(first_ind);
         ciField* cifield;
         if (iklass != NULL) {
@@ -1063,7 +1063,6 @@
   int scloff = jvms->scloff();
   int endoff = jvms->endoff();
   assert(endoff == (int)req(), "no other states or debug info after me");
-  assert(jvms->scl_size() == 0, "parsed code should not have scalar objects");
   Node* top = Compile::current()->top();
   for (uint i = 0; i < grow_by; i++) {
     ins_req(monoff, top);
@@ -1079,32 +1078,31 @@
   const int MonitorEdges = 2;
   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
   assert(req() == jvms()->endoff(), "correct sizing");
-  assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
   int nextmon = jvms()->scloff();
   if (GenerateSynchronizationCode) {
-    add_req(lock->box_node());
-    add_req(lock->obj_node());
+    ins_req(nextmon,   lock->box_node());
+    ins_req(nextmon+1, lock->obj_node());
   } else {
     Node* top = Compile::current()->top();
-    add_req(top);
-    add_req(top);
+    ins_req(nextmon, top);
+    ins_req(nextmon, top);
   }
-  jvms()->set_scloff(nextmon+MonitorEdges);
+  jvms()->set_scloff(nextmon + MonitorEdges);
   jvms()->set_endoff(req());
 }
 
 void SafePointNode::pop_monitor() {
   // Delete last monitor from debug info
-  assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
   debug_only(int num_before_pop = jvms()->nof_monitors());
-  const int MonitorEdges = (1<<JVMState::logMonitorEdges);
+  const int MonitorEdges = 2;
+  assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
   int scloff = jvms()->scloff();
   int endoff = jvms()->endoff();
   int new_scloff = scloff - MonitorEdges;
   int new_endoff = endoff - MonitorEdges;
   jvms()->set_scloff(new_scloff);
   jvms()->set_endoff(new_endoff);
-  while (scloff > new_scloff)  del_req(--scloff);
+  while (scloff > new_scloff)  del_req_ordered(--scloff);
   assert(jvms()->nof_monitors() == num_before_pop-1, "");
 }
 
@@ -1169,13 +1167,12 @@
 }
 
 SafePointScalarObjectNode*
-SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
+SafePointScalarObjectNode::clone(Dict* sosn_map) const {
   void* cached = (*sosn_map)[(void*)this];
   if (cached != NULL) {
     return (SafePointScalarObjectNode*)cached;
   }
   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
-  res->_first_index += jvms_adj;
   sosn_map->Insert((void*)this, (void*)res);
   return res;
 }
--- a/hotspot/src/share/vm/opto/callnode.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -449,14 +449,17 @@
 // at a safepoint.
 
 class SafePointScalarObjectNode: public TypeNode {
-  uint _first_index; // First input edge index of a SafePoint node where
+  uint _first_index; // First input edge relative index of a SafePoint node where
                      // states of the scalarized object fields are collected.
+                     // It is relative to the last (youngest) jvms->_scloff.
   uint _n_fields;    // Number of non-static fields of the scalarized object.
   DEBUG_ONLY(AllocateNode* _alloc;)
 
   virtual uint hash() const ; // { return NO_HASH; }
   virtual uint cmp( const Node &n ) const;
 
+  uint first_index() const { return _first_index; }
+
 public:
   SafePointScalarObjectNode(const TypeOopPtr* tp,
 #ifdef ASSERT
@@ -469,7 +472,10 @@
   virtual const RegMask &out_RegMask() const;
   virtual uint           match_edge(uint idx) const;
 
-  uint first_index() const { return _first_index; }
+  uint first_index(JVMState* jvms) const {
+    assert(jvms != NULL, "missed JVMS");
+    return jvms->scloff() + _first_index;
+  }
   uint n_fields()    const { return _n_fields; }
 
 #ifdef ASSERT
@@ -485,7 +491,7 @@
   // corresponds appropriately to "this" in "new_call".  Assumes that
   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
-  SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
+  SafePointScalarObjectNode* clone(Dict* sosn_map) const;
 
 #ifndef PRODUCT
   virtual void              dump_spec(outputStream *st) const;
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -301,7 +301,7 @@
       // Copy kill projections after the cloned node
       Node* kills = proj->clone();
       kills->set_req(0, copy);
-      b->_nodes.insert(idx++, kills);
+      b->insert_node(kills, idx++);
       _cfg.map_node_to_block(kills, b);
       new_lrg(kills, max_lrg_id++);
     }
@@ -682,11 +682,11 @@
   uint lr_counter = 1;
   for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
     Block* block = _cfg.get_block(i);
-    uint cnt = block->_nodes.size();
+    uint cnt = block->number_of_nodes();
 
     // Handle all the normal Nodes in the block
     for( uint j = 0; j < cnt; j++ ) {
-      Node *n = block->_nodes[j];
+      Node *n = block->get_node(j);
       // Pre-color to the zero live range, or pick virtual register
       const RegMask &rm = n->out_RegMask();
       _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
@@ -710,8 +710,8 @@
     Block* block = _cfg.get_block(i);
 
     // For all instructions
-    for (uint j = 1; j < block->_nodes.size(); j++) {
-      Node* n = block->_nodes[j];
+    for (uint j = 1; j < block->number_of_nodes(); j++) {
+      Node* n = block->get_node(j);
       uint input_edge_start =1; // Skip control most nodes
       if (n->is_Mach()) {
         input_edge_start = n->as_Mach()->oper_input_base();
@@ -1604,7 +1604,7 @@
     // For all instructions in block
     uint last_inst = block->end_idx();
     for (uint j = 1; j <= last_inst; j++) {
-      Node* n = block->_nodes[j];
+      Node* n = block->get_node(j);
 
       // Dead instruction???
       assert( n->outcnt() != 0 ||// Nothing dead after post alloc
@@ -1641,7 +1641,7 @@
             assert( cisc->oper_input_base() == 2, "Only adding one edge");
             cisc->ins_req(1,src);         // Requires a memory edge
           }
-          block->_nodes.map(j,cisc);          // Insert into basic block
+          block->map_node(cisc, j);          // Insert into basic block
           n->subsume_by(cisc, C); // Correct graph
           //
           ++_used_cisc_instructions;
@@ -1698,7 +1698,7 @@
       // (where top() node is placed).
       base->init_req(0, _cfg.get_root_node());
       Block *startb = _cfg.get_block_for_node(C->top());
-      startb->_nodes.insert(startb->find_node(C->top()), base );
+      startb->insert_node(base, startb->find_node(C->top()));
       _cfg.map_node_to_block(base, startb);
       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
     }
@@ -1743,9 +1743,9 @@
   // Search the current block for an existing base-Phi
   Block *b = _cfg.get_block_for_node(derived);
   for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
-    Node *phi = b->_nodes[i];
+    Node *phi = b->get_node(i);
     if( !phi->is_Phi() ) {      // Found end of Phis with no match?
-      b->_nodes.insert( i, base ); // Must insert created Phi here as base
+      b->insert_node(base,  i); // Must insert created Phi here as base
       _cfg.map_node_to_block(base, b);
       new_lrg(base,maxlrg++);
       break;
@@ -1786,7 +1786,7 @@
     IndexSet liveout(_live->live(block));
 
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
       // like to see in the same register.  Compare uses the loop-phi and so
@@ -1979,8 +1979,8 @@
   b->dump_head(&_cfg);
 
   // For all instructions
-  for( uint j = 0; j < b->_nodes.size(); j++ )
-    dump(b->_nodes[j]);
+  for( uint j = 0; j < b->number_of_nodes(); j++ )
+    dump(b->get_node(j));
   // Print live-out info at end of block
   if( _live ) {
     tty->print("Liveout: ");
@@ -2271,8 +2271,8 @@
     int dump_once = 0;
 
     // For all instructions
-    for( uint j = 0; j < block->_nodes.size(); j++ ) {
-      Node *n = block->_nodes[j];
+    for( uint j = 0; j < block->number_of_nodes(); j++ ) {
+      Node *n = block->get_node(j);
       if (_lrg_map.find_const(n) == lidx) {
         if (!dump_once++) {
           tty->cr();
--- a/hotspot/src/share/vm/opto/coalesce.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/coalesce.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -54,9 +54,9 @@
     for( j=0; j<b->_num_succs; j++ )
       tty->print("B%d ",b->_succs[j]->_pre_order);
     tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
-    uint cnt = b->_nodes.size();
+    uint cnt = b->number_of_nodes();
     for( j=0; j<cnt; j++ ) {
-      Node *n = b->_nodes[j];
+      Node *n = b->get_node(j);
       dump( n );
       tty->print("\t%s\t",n->Name());
 
@@ -152,7 +152,7 @@
   // after the last use.  Last use is really first-use on a backwards scan.
   uint i = b->end_idx()-1;
   while(1) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Check for end of virtual copies; this is also the end of the
     // parallel renaming effort.
     if (n->_idx < _unique) {
@@ -174,7 +174,7 @@
   // the last kill.  Thus it is the first kill on a backwards scan.
   i = b->end_idx()-1;
   while (1) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Check for end of virtual copies; this is also the end of the
     // parallel renaming effort.
     if (n->_idx < _unique) {
@@ -200,13 +200,13 @@
     tmp ->set_req(idx,copy->in(idx));
     copy->set_req(idx,tmp);
     // Save source in temp early, before source is killed
-    b->_nodes.insert(kill_src_idx,tmp);
+    b->insert_node(tmp, kill_src_idx);
     _phc._cfg.map_node_to_block(tmp, b);
     last_use_idx++;
   }
 
   // Insert just after last use
-  b->_nodes.insert(last_use_idx+1,copy);
+  b->insert_node(copy, last_use_idx + 1);
 }
 
 void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
@@ -237,8 +237,8 @@
     Block *b = _phc._cfg.get_block(i);
     uint cnt = b->num_preds();  // Number of inputs to the Phi
 
-    for( uint l = 1; l<b->_nodes.size(); l++ ) {
-      Node *n = b->_nodes[l];
+    for( uint l = 1; l<b->number_of_nodes(); l++ ) {
+      Node *n = b->get_node(l);
 
       // Do not use removed-copies, use copied value instead
       uint ncnt = n->req();
@@ -260,7 +260,7 @@
         if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
           n->replace_by(def);
           n->set_req(cidx,NULL);
-          b->_nodes.remove(l);
+          b->remove_node(l);
           l--;
           continue;
         }
@@ -321,13 +321,13 @@
                m->as_Mach()->rematerialize()) {
               copy = m->clone();
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert(l++, copy);
+              b->insert_node(copy, l++);
               l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
             } else {
               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
               copy = new (C) MachSpillCopyNode(m, *rm, *rm);
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert(l++, copy);
+              b->insert_node(copy, l++);
             }
             // Insert the copy in the use-def chain
             n->set_req(idx, copy);
@@ -339,7 +339,7 @@
         } // End of is two-adr
 
         // Insert a copy at a debug use for a lrg which has high frequency
-        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
+        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) {
           // Walk the debug inputs to the node and check for lrg freq
           JVMState* jvms = n->jvms();
           uint debug_start = jvms ? jvms->debug_start() : 999999;
@@ -376,7 +376,7 @@
               // Insert the copy in the use-def chain
               n->set_req(inpidx, copy );
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert( l++, copy );
+              b->insert_node(copy,  l++);
               // Extend ("register allocate") the names array for the copy.
               uint max_lrg_id = _phc._lrg_map.max_lrg_id();
               _phc.new_lrg(copy, max_lrg_id);
@@ -431,8 +431,8 @@
     }
 
     // Visit all the Phis in successor block
-    for( uint k = 1; k<bs->_nodes.size(); k++ ) {
-      Node *n = bs->_nodes[k];
+    for( uint k = 1; k<bs->number_of_nodes(); k++ ) {
+      Node *n = bs->get_node(k);
       if( !n->is_Phi() ) break;
       combine_these_two( n, n->in(j) );
     }
@@ -442,7 +442,7 @@
   // Check _this_ block for 2-address instructions and copies.
   uint cnt = b->end_idx();
   for( i = 1; i<cnt; i++ ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     uint idx;
     // 2-address instructions have a virtual Copy matching their input
     // to their output
@@ -490,10 +490,10 @@
   dst_copy->set_req( didx, src_def );
   // Add copy to free list
   // _phc.free_spillcopy(b->_nodes[bindex]);
-  assert( b->_nodes[bindex] == dst_copy, "" );
+  assert( b->get_node(bindex) == dst_copy, "" );
   dst_copy->replace_by( dst_copy->in(didx) );
   dst_copy->set_req( didx, NULL);
-  b->_nodes.remove(bindex);
+  b->remove_node(bindex);
   if( bindex < b->_ihrp_index ) b->_ihrp_index--;
   if( bindex < b->_fhrp_index ) b->_fhrp_index--;
 
@@ -523,8 +523,8 @@
       bindex2 = b2->end_idx()-1;
     }
     // Get prior instruction
-    assert(bindex2 < b2->_nodes.size(), "index out of bounds");
-    Node *x = b2->_nodes[bindex2];
+    assert(bindex2 < b2->number_of_nodes(), "index out of bounds");
+    Node *x = b2->get_node(bindex2);
     if( x == prev_copy ) {      // Previous copy in copy chain?
       if( prev_copy == src_copy)// Found end of chain and all interferences
         break;                  // So break out of loop
@@ -769,14 +769,14 @@
 // Conservative (but pessimistic) copy coalescing of a single block
 void PhaseConservativeCoalesce::coalesce( Block *b ) {
   // Bail out on infrequent blocks
-  if (b->is_uncommon(&_phc._cfg)) {
+  if (_phc._cfg.is_uncommon(b)) {
     return;
   }
   // Check this block for copies.
   for( uint i = 1; i<b->end_idx(); i++ ) {
     // Check for actual copies on inputs.  Coalesce a copy into its
     // input if use and copy's input are compatible.
-    Node *copy1 = b->_nodes[i];
+    Node *copy1 = b->get_node(i);
     uint idx1 = copy1->is_Copy();
     if( !idx1 ) continue;       // Not a copy
 
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -2258,7 +2258,7 @@
     if (block->is_connector() && !Verbose) {
       continue;
     }
-    n = block->_nodes[0];
+    n = block->head();
     if (pcs && n->_idx < pc_limit) {
       tty->print("%3.3x   ", pcs[n->_idx]);
     } else {
@@ -2273,12 +2273,12 @@
 
     // For all instructions
     Node *delay = NULL;
-    for (uint j = 0; j < block->_nodes.size(); j++) {
+    for (uint j = 0; j < block->number_of_nodes(); j++) {
       if (VMThread::should_terminate()) {
         cut_short = true;
         break;
       }
-      n = block->_nodes[j];
+      n = block->get_node(j);
       if (valid_bundle_info(n)) {
         Bundle* bundle = node_bundling(n);
         if (bundle->used_in_unconditional_delay()) {
--- a/hotspot/src/share/vm/opto/domgraph.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/domgraph.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -211,21 +211,21 @@
 uint Block_Stack::most_frequent_successor( Block *b ) {
   uint freq_idx = 0;
   int eidx = b->end_idx();
-  Node *n = b->_nodes[eidx];
+  Node *n = b->get_node(eidx);
   int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
   switch( op ) {
   case Op_CountedLoopEnd:
   case Op_If: {               // Split frequency amongst children
     float prob = n->as_MachIf()->_prob;
     // Is succ[0] the TRUE branch or the FALSE branch?
-    if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse )
+    if( b->get_node(eidx+1)->Opcode() == Op_IfFalse )
       prob = 1.0f - prob;
     freq_idx = prob < PROB_FAIR;      // freq=1 for succ[0] < 0.5 prob
     break;
   }
   case Op_Catch:                // Split frequency amongst children
     for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ )
-      if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index )
+      if( b->get_node(eidx+1+freq_idx)->as_CatchProj()->_con == CatchProjNode::fall_through_index )
         break;
     // Handle case of no fall-thru (e.g., check-cast MUST throw an exception)
     if( freq_idx == b->_num_succs ) freq_idx = 0;
--- a/hotspot/src/share/vm/opto/gcm.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -102,12 +102,12 @@
     uint j = 0;
     if (pb->_num_succs != 1) {  // More then 1 successor?
       // Search for successor
-      uint max = pb->_nodes.size();
+      uint max = pb->number_of_nodes();
       assert( max > 1, "" );
       uint start = max - pb->_num_succs;
       // Find which output path belongs to projection
       for (j = start; j < max; j++) {
-        if( pb->_nodes[j] == in0 )
+        if( pb->get_node(j) == in0 )
           break;
       }
       assert( j < max, "must find" );
@@ -1027,8 +1027,8 @@
   Block* least       = LCA;
   double least_freq  = least->_freq;
   uint target        = get_latency_for_node(self);
-  uint start_latency = get_latency_for_node(LCA->_nodes[0]);
-  uint end_latency   = get_latency_for_node(LCA->_nodes[LCA->end_idx()]);
+  uint start_latency = get_latency_for_node(LCA->head());
+  uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
   bool in_latency    = (target <= start_latency);
   const Block* root_block = get_block_for_node(_root);
 
@@ -1049,9 +1049,9 @@
     self->dump();
     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
       LCA->_pre_order,
-      LCA->_nodes[0]->_idx,
+      LCA->head()->_idx,
       start_latency,
-      LCA->_nodes[LCA->end_idx()]->_idx,
+      LCA->get_node(LCA->end_idx())->_idx,
       end_latency,
       least_freq);
   }
@@ -1074,14 +1074,14 @@
     if (mach && LCA == root_block)
       break;
 
-    uint start_lat = get_latency_for_node(LCA->_nodes[0]);
+    uint start_lat = get_latency_for_node(LCA->head());
     uint end_idx   = LCA->end_idx();
-    uint end_lat   = get_latency_for_node(LCA->_nodes[end_idx]);
+    uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
     double LCA_freq = LCA->_freq;
 #ifndef PRODUCT
     if (trace_opto_pipelining()) {
       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
-        LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
+        LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
     }
 #endif
     cand_cnt++;
@@ -1342,7 +1342,7 @@
       Node* proj = _matcher._null_check_tests[i];
       Node* val  = _matcher._null_check_tests[i + 1];
       Block* block = get_block_for_node(proj);
-      block->implicit_null_check(this, proj, val, allowed_reasons);
+      implicit_null_check(block, proj, val, allowed_reasons);
       // The implicit_null_check will only perform the transformation
       // if the null branch is truly uncommon, *and* it leads to an
       // uncommon trap.  Combined with the too_many_traps guards
@@ -1363,7 +1363,7 @@
   visited.Clear();
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    if (!block->schedule_local(this, _matcher, ready_cnt, visited)) {
+    if (!schedule_local(block, ready_cnt, visited)) {
       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
         C->record_method_not_compilable("local schedule failed");
       }
@@ -1375,7 +1375,7 @@
   // clone the instructions on all paths below the Catch.
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    block->call_catch_cleanup(this, C);
+    call_catch_cleanup(block);
   }
 
 #ifndef PRODUCT
@@ -1726,7 +1726,7 @@
 // Determine the probability of reaching successor 'i' from the receiver block.
 float Block::succ_prob(uint i) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
@@ -1761,7 +1761,7 @@
     float prob  = n->as_MachIf()->_prob;
     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
     // If succ[i] is the FALSE branch, invert path info
-    if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
+    if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
       return 1.0f - prob; // not taken
     } else {
       return prob; // taken
@@ -1773,7 +1773,7 @@
     return 1.0f/_num_succs;
 
   case Op_Catch: {
-    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
     if (ci->_con == CatchProjNode::fall_through_index) {
       // Fall-thru path gets the lion's share.
       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
@@ -1810,7 +1810,7 @@
 // Return the number of fall-through candidates for a block
 int Block::num_fall_throughs() {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
@@ -1834,7 +1834,7 @@
 
   case Op_Catch: {
     for (uint i = 0; i < _num_succs; i++) {
-      const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+      const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
       if (ci->_con == CatchProjNode::fall_through_index) {
         return 1;
       }
@@ -1862,14 +1862,14 @@
 // Return true if a specific successor could be fall-through target.
 bool Block::succ_fall_through(uint i) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
     if (n->is_MachNullCheck()) {
       // In theory, either side can fall-thru, for simplicity sake,
       // let's say only the false branch can now.
-      return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
+      return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
     }
     op = n->as_Mach()->ideal_Opcode();
   }
@@ -1883,7 +1883,7 @@
     return true;
 
   case Op_Catch: {
-    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
     return ci->_con == CatchProjNode::fall_through_index;
   }
 
@@ -1907,7 +1907,7 @@
 // Update the probability of a two-branch to be uncommon
 void Block::update_uncommon_branch(Block* ub) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->as_Mach()->ideal_Opcode();
 
@@ -1923,7 +1923,7 @@
 
   // If ub is the true path, make the proability small, else
   // ub is the false path, and make the probability large
-  bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
+  bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
 
   // Get existing probability
   float p = n->as_MachIf()->_prob;
--- a/hotspot/src/share/vm/opto/generateOptoStub.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/generateOptoStub.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -61,6 +61,7 @@
   JVMState* jvms = new (C) JVMState(0);
   jvms->set_bci(InvocationEntryBci);
   jvms->set_monoff(max_map);
+  jvms->set_scloff(max_map);
   jvms->set_endoff(max_map);
   {
     SafePointNode *map = new (C) SafePointNode( max_map, jvms );
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1501,6 +1501,25 @@
   }
 }
 
+bool GraphKit::can_move_pre_barrier() const {
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  switch (bs->kind()) {
+    case BarrierSet::G1SATBCT:
+    case BarrierSet::G1SATBCTLogging:
+      return true; // Can move it if no safepoint
+
+    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableExtension:
+    case BarrierSet::ModRef:
+      return true; // There is no pre-barrier
+
+    case BarrierSet::Other:
+    default      :
+      ShouldNotReachHere();
+  }
+  return false;
+}
+
 void GraphKit::post_barrier(Node* ctl,
                             Node* store,
                             Node* obj,
@@ -3551,6 +3570,8 @@
   } else {
     // In this case both val_type and alias_idx are unused.
     assert(pre_val != NULL, "must be loaded already");
+    // Nothing to be done if pre_val is null.
+    if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
   }
   assert(bt == T_OBJECT, "or we shouldn't be here");
@@ -3595,7 +3616,7 @@
     if (do_load) {
       // load original value
       // alias_idx correct??
-      pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
+      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
     }
 
     // if (pre_val != NULL)
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -695,6 +695,10 @@
   void write_barrier_post(Node *store, Node* obj,
                           Node* adr,  uint adr_idx, Node* val, bool use_precise);
 
+  // Allow reordering of pre-barrier with oop store and/or post-barrier.
+  // Used for load_store operations which loads old value.
+  bool can_move_pre_barrier() const;
+
   // G1 pre/post barriers
   void g1_write_barrier_pre(bool do_load,
                             Node* obj,
--- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -639,8 +639,8 @@
     // reachable but are in the CFG so add them here.
     for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
       Block* block = C->cfg()->get_block(i);
-      for (uint s = 0; s < block->_nodes.size(); s++) {
-        nodeStack.push(block->_nodes[s]);
+      for (uint s = 0; s < block->number_of_nodes(); s++) {
+        nodeStack.push(block->get_node(s));
       }
     }
   }
@@ -713,9 +713,9 @@
       tail(SUCCESSORS_ELEMENT);
 
       head(NODES_ELEMENT);
-      for (uint s = 0; s < block->_nodes.size(); s++) {
+      for (uint s = 0; s < block->number_of_nodes(); s++) {
         begin_elem(NODE_ELEMENT);
-        print_attr(NODE_ID_PROPERTY, get_node_id(block->_nodes[s]));
+        print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
         end_elem();
       }
       tail(NODES_ELEMENT);
--- a/hotspot/src/share/vm/opto/ifg.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/ifg.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -319,7 +319,7 @@
     // value is then removed from the live-ness set and it's inputs are
     // added to the live-ness set.
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Get value being defined
       uint r = _lrg_map.live_range_id(n);
@@ -456,7 +456,7 @@
     // Compute first nonphi node index
     uint first_inst;
     for (first_inst = 1; first_inst < last_inst; first_inst++) {
-      if (!block->_nodes[first_inst]->is_Phi()) {
+      if (!block->get_node(first_inst)->is_Phi()) {
         break;
       }
     }
@@ -464,15 +464,15 @@
     // Spills could be inserted before CreateEx node which should be
     // first instruction in block after Phis. Move CreateEx up.
     for (uint insidx = first_inst; insidx < last_inst; insidx++) {
-      Node *ex = block->_nodes[insidx];
+      Node *ex = block->get_node(insidx);
       if (ex->is_SpillCopy()) {
         continue;
       }
       if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
         // If the CreateEx isn't above all the MachSpillCopies
         // then move it to the top.
-        block->_nodes.remove(insidx);
-        block->_nodes.insert(first_inst, ex);
+        block->remove_node(insidx);
+        block->insert_node(ex, first_inst);
       }
       // Stop once a CreateEx or any other node is found
       break;
@@ -523,7 +523,7 @@
     // to the live-ness set.
     uint j;
     for (j = last_inst + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Get value being defined
       uint r = _lrg_map.live_range_id(n);
@@ -541,7 +541,7 @@
           if( !n->is_Proj() ||
               // Could also be a flags-projection of a dead ADD or such.
               (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
-            block->_nodes.remove(j - 1);
+            block->remove_node(j - 1);
             if (lrgs(r)._def == n) {
               lrgs(r)._def = 0;
             }
@@ -605,7 +605,7 @@
             // (j - 1) is index for current instruction 'n'
             Node *m = n;
             for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
-              m = block->_nodes[i];
+              m = block->get_node(i);
             }
             if (m == single_use) {
               lrgs(r)._area = 0.0;
@@ -772,20 +772,20 @@
 
     // Compute high pressure indice; avoid landing in the middle of projnodes
     j = hrp_index[0];
-    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
-      Node* cur = block->_nodes[j];
+    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
+      Node* cur = block->get_node(j);
       while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
         j--;
-        cur = block->_nodes[j];
+        cur = block->get_node(j);
       }
     }
     block->_ihrp_index = j;
     j = hrp_index[1];
-    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
-      Node* cur = block->_nodes[j];
+    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
+      Node* cur = block->get_node(j);
       while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
         j--;
-        cur = block->_nodes[j];
+        cur = block->get_node(j);
       }
     }
     block->_fhrp_index = j;
--- a/hotspot/src/share/vm/opto/lcm.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -58,14 +58,14 @@
 // The proj is the control projection for the not-null case.
 // The val is the pointer being checked for nullness or
 // decodeHeapOop_not_null node if it did not fold into address.
-void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
+void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
   // Assume if null check need for 0 offset then always needed
   // Intel solaris doesn't support any null checks yet and no
   // mechanism exists (yet) to set the switches at an os_cpu level
   if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
 
   // Make sure the ptr-is-null path appears to be uncommon!
-  float f = end()->as_MachIf()->_prob;
+  float f = block->end()->as_MachIf()->_prob;
   if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
   if( f > PROB_UNLIKELY_MAG(4) ) return;
 
@@ -75,13 +75,13 @@
   // Get the successor block for if the test ptr is non-null
   Block* not_null_block;  // this one goes with the proj
   Block* null_block;
-  if (_nodes[_nodes.size()-1] == proj) {
-    null_block     = _succs[0];
-    not_null_block = _succs[1];
+  if (block->get_node(block->number_of_nodes()-1) == proj) {
+    null_block     = block->_succs[0];
+    not_null_block = block->_succs[1];
   } else {
-    assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other");
-    not_null_block = _succs[0];
-    null_block     = _succs[1];
+    assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
+    not_null_block = block->_succs[0];
+    null_block     = block->_succs[1];
   }
   while (null_block->is_Empty() == Block::empty_with_goto) {
     null_block     = null_block->_succs[0];
@@ -93,8 +93,8 @@
   // detect failure of this optimization, as in 6366351.)
   {
     bool found_trap = false;
-    for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
-      Node* nn = null_block->_nodes[i1];
+    for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
+      Node* nn = null_block->get_node(i1);
       if (nn->is_MachCall() &&
           nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
         const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
@@ -237,20 +237,20 @@
     }
 
     // Check ctrl input to see if the null-check dominates the memory op
-    Block *cb = cfg->get_block_for_node(mach);
+    Block *cb = get_block_for_node(mach);
     cb = cb->_idom;             // Always hoist at least 1 block
     if( !was_store ) {          // Stores can be hoisted only one block
-      while( cb->_dom_depth > (_dom_depth + 1))
+      while( cb->_dom_depth > (block->_dom_depth + 1))
         cb = cb->_idom;         // Hoist loads as far as we want
       // The non-null-block should dominate the memory op, too. Live
       // range spilling will insert a spill in the non-null-block if it is
       // needs to spill the memory op for an implicit null check.
-      if (cb->_dom_depth == (_dom_depth + 1)) {
+      if (cb->_dom_depth == (block->_dom_depth + 1)) {
         if (cb != not_null_block) continue;
         cb = cb->_idom;
       }
     }
-    if( cb != this ) continue;
+    if( cb != block ) continue;
 
     // Found a memory user; see if it can be hoisted to check-block
     uint vidx = 0;              // Capture index of value into memop
@@ -262,8 +262,8 @@
         if( is_decoden ) continue;
       }
       // Block of memory-op input
-      Block *inb = cfg->get_block_for_node(mach->in(j));
-      Block *b = this;          // Start from nul check
+      Block *inb = get_block_for_node(mach->in(j));
+      Block *b = block;          // Start from nul check
       while( b != inb && b->_dom_depth > inb->_dom_depth )
         b = b->_idom;           // search upwards for input
       // See if input dominates null check
@@ -272,28 +272,28 @@
     }
     if( j > 0 )
       continue;
-    Block *mb = cfg->get_block_for_node(mach);
+    Block *mb = get_block_for_node(mach);
     // Hoisting stores requires more checks for the anti-dependence case.
     // Give up hoisting if we have to move the store past any load.
     if( was_store ) {
       Block *b = mb;            // Start searching here for a local load
       // mach use (faulting) trying to hoist
       // n might be blocker to hoisting
-      while( b != this ) {
+      while( b != block ) {
         uint k;
-        for( k = 1; k < b->_nodes.size(); k++ ) {
-          Node *n = b->_nodes[k];
+        for( k = 1; k < b->number_of_nodes(); k++ ) {
+          Node *n = b->get_node(k);
           if( n->needs_anti_dependence_check() &&
               n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
             break;              // Found anti-dependent load
         }
-        if( k < b->_nodes.size() )
+        if( k < b->number_of_nodes() )
           break;                // Found anti-dependent load
         // Make sure control does not do a merge (would have to check allpaths)
         if( b->num_preds() != 2 ) break;
-        b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
+        b = get_block_for_node(b->pred(1)); // Move up to predecessor block
       }
-      if( b != this ) continue;
+      if( b != block ) continue;
     }
 
     // Make sure this memory op is not already being used for a NullCheck
@@ -303,7 +303,7 @@
 
     // Found a candidate!  Pick one with least dom depth - the highest
     // in the dom tree should be closest to the null check.
-    if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
+    if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
       best = mach;
       bidx = vidx;
     }
@@ -319,46 +319,45 @@
 
   if( is_decoden ) {
     // Check if we need to hoist decodeHeapOop_not_null first.
-    Block *valb = cfg->get_block_for_node(val);
-    if( this != valb && this->_dom_depth < valb->_dom_depth ) {
+    Block *valb = get_block_for_node(val);
+    if( block != valb && block->_dom_depth < valb->_dom_depth ) {
       // Hoist it up to the end of the test block.
       valb->find_remove(val);
-      this->add_inst(val);
-      cfg->map_node_to_block(val, this);
+      block->add_inst(val);
+      map_node_to_block(val, block);
       // DecodeN on x86 may kill flags. Check for flag-killing projections
       // that also need to be hoisted.
       for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
         Node* n = val->fast_out(j);
         if( n->is_MachProj() ) {
-          cfg->get_block_for_node(n)->find_remove(n);
-          this->add_inst(n);
-          cfg->map_node_to_block(n, this);
+          get_block_for_node(n)->find_remove(n);
+          block->add_inst(n);
+          map_node_to_block(n, block);
         }
       }
     }
   }
   // Hoist the memory candidate up to the end of the test block.
-  Block *old_block = cfg->get_block_for_node(best);
+  Block *old_block = get_block_for_node(best);
   old_block->find_remove(best);
-  add_inst(best);
-  cfg->map_node_to_block(best, this);
+  block->add_inst(best);
+  map_node_to_block(best, block);
 
   // Move the control dependence
-  if (best->in(0) && best->in(0) == old_block->_nodes[0])
-    best->set_req(0, _nodes[0]);
+  if (best->in(0) && best->in(0) == old_block->head())
+    best->set_req(0, block->head());
 
   // Check for flag-killing projections that also need to be hoisted
   // Should be DU safe because no edge updates.
   for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
     Node* n = best->fast_out(j);
     if( n->is_MachProj() ) {
-      cfg->get_block_for_node(n)->find_remove(n);
-      add_inst(n);
-      cfg->map_node_to_block(n, this);
+      get_block_for_node(n)->find_remove(n);
+      block->add_inst(n);
+      map_node_to_block(n, block);
     }
   }
 
-  Compile *C = cfg->C;
   // proj==Op_True --> ne test; proj==Op_False --> eq test.
   // One of two graph shapes got matched:
   //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
@@ -368,10 +367,10 @@
   // We need to flip the projections to keep the same semantics.
   if( proj->Opcode() == Op_IfTrue ) {
     // Swap order of projections in basic block to swap branch targets
-    Node *tmp1 = _nodes[end_idx()+1];
-    Node *tmp2 = _nodes[end_idx()+2];
-    _nodes.map(end_idx()+1, tmp2);
-    _nodes.map(end_idx()+2, tmp1);
+    Node *tmp1 = block->get_node(block->end_idx()+1);
+    Node *tmp2 = block->get_node(block->end_idx()+2);
+    block->map_node(tmp2, block->end_idx()+1);
+    block->map_node(tmp1, block->end_idx()+2);
     Node *tmp = new (C) Node(C->top()); // Use not NULL input
     tmp1->replace_by(tmp);
     tmp2->replace_by(tmp1);
@@ -384,8 +383,8 @@
   // it as well.
   Node *old_tst = proj->in(0);
   MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
-  _nodes.map(end_idx(),nul_chk);
-  cfg->map_node_to_block(nul_chk, this);
+  block->map_node(nul_chk, block->end_idx());
+  map_node_to_block(nul_chk, block);
   // Redirect users of old_test to nul_chk
   for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
     old_tst->last_out(i2)->set_req(0, nul_chk);
@@ -393,8 +392,8 @@
   for (uint i3 = 0; i3 < old_tst->req(); i3++)
     old_tst->set_req(i3, NULL);
 
-  cfg->latency_from_uses(nul_chk);
-  cfg->latency_from_uses(best);
+  latency_from_uses(nul_chk);
+  latency_from_uses(best);
 }
 
 
@@ -408,7 +407,7 @@
 // remaining cases (most), choose the instruction with the greatest latency
 // (that is, the most number of pseudo-cycles required to the end of the
 // routine). If there is a tie, choose the instruction with the most inputs.
-Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
+Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
 
   // If only a single entry on the stack, use it
   uint cnt = worklist.size();
@@ -442,7 +441,7 @@
     }
 
     // Final call in a block must be adjacent to 'catch'
-    Node *e = end();
+    Node *e = block->end();
     if( e->is_Catch() && e->in(0)->in(0) == n )
       continue;
 
@@ -468,7 +467,7 @@
         Node* use = n->fast_out(j);
 
         // The use is a conditional branch, make them adjacent
-        if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
+        if (use->is_MachIf() && get_block_for_node(use) == block) {
           found_machif = true;
           break;
         }
@@ -501,7 +500,7 @@
       n_choice = 1;
     }
 
-    uint n_latency = cfg->get_latency_for_node(n);
+    uint n_latency = get_latency_for_node(n);
     uint n_score   = n->req();   // Many inputs get high score to break ties
 
     // Keep best latency found
@@ -529,13 +528,13 @@
 
 
 //------------------------------set_next_call----------------------------------
-void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
+void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
   if( next_call.test_set(n->_idx) ) return;
   for( uint i=0; i<n->len(); i++ ) {
     Node *m = n->in(i);
     if( !m ) continue;  // must see all nodes in block that precede call
-    if (cfg->get_block_for_node(m) == this) {
-      set_next_call(m, next_call, cfg);
+    if (get_block_for_node(m) == block) {
+      set_next_call(block, m, next_call);
     }
   }
 }
@@ -546,12 +545,12 @@
 // next subroutine call get priority - basically it moves things NOT needed
 // for the next call till after the call.  This prevents me from trying to
 // carry lots of stuff live across a call.
-void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
+void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
   // Find the next control-defining Node in this block
   Node* call = NULL;
   for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
     Node* m = this_call->fast_out(i);
-    if(cfg->get_block_for_node(m) == this && // Local-block user
+    if(get_block_for_node(m) == block && // Local-block user
         m != this_call &&       // Not self-start node
         m->is_MachCall() )
       call = m;
@@ -559,11 +558,12 @@
   }
   if (call == NULL)  return;    // No next call (e.g., block end is near)
   // Set next-call for all inputs to this call
-  set_next_call(call, next_call, cfg);
+  set_next_call(block, call, next_call);
 }
 
 //------------------------------add_call_kills-------------------------------------
-void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
+// helper function that adds caller save registers to MachProjNode
+static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
   // Fill in the kill mask for the call
   for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
     if( !regs.Member(r) ) {     // Not already defined by the call
@@ -579,7 +579,7 @@
 
 
 //------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
   RegMask regs;
 
   // Schedule all the users of the call right now.  All the users are
@@ -592,18 +592,18 @@
     ready_cnt.at_put(n->_idx, n_cnt);
     assert( n_cnt == 0, "" );
     // Schedule next to call
-    _nodes.map(node_cnt++, n);
+    block->map_node(n, node_cnt++);
     // Collect defined registers
     regs.OR(n->out_RegMask());
     // Check for scheduling the next control-definer
     if( n->bottom_type() == Type::CONTROL )
       // Warm up next pile of heuristic bits
-      needed_for_next_call(n, next_call, cfg);
+      needed_for_next_call(block, n, next_call);
 
     // Children of projections are now all ready
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j); // Get user
-      if(cfg->get_block_for_node(m) != this) {
+      if(get_block_for_node(m) != block) {
         continue;
       }
       if( m->is_Phi() ) continue;
@@ -617,14 +617,14 @@
 
   // Act as if the call defines the Frame Pointer.
   // Certainly the FP is alive and well after the call.
-  regs.Insert(matcher.c_frame_pointer());
+  regs.Insert(_matcher.c_frame_pointer());
 
   // Set all registers killed and not already defined by the call.
   uint r_cnt = mcall->tf()->range()->cnt();
   int op = mcall->ideal_Opcode();
-  MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
-  cfg->map_node_to_block(proj, this);
-  _nodes.insert(node_cnt++, proj);
+  MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
+  map_node_to_block(proj, block);
+  block->insert_node(proj, node_cnt++);
 
   // Select the right register save policy.
   const char * save_policy;
@@ -633,13 +633,13 @@
     case Op_CallLeaf:
     case Op_CallLeafNoFP:
       // Calling C code so use C calling convention
-      save_policy = matcher._c_reg_save_policy;
+      save_policy = _matcher._c_reg_save_policy;
       break;
 
     case Op_CallStaticJava:
     case Op_CallDynamicJava:
       // Calling Java code so use Java calling convention
-      save_policy = matcher._register_save_policy;
+      save_policy = _matcher._register_save_policy;
       break;
 
     default:
@@ -674,44 +674,46 @@
 
 //------------------------------schedule_local---------------------------------
 // Topological sort within a block.  Someday become a real scheduler.
-bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
+bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
   // Already "sorted" are the block start Node (as the first entry), and
   // the block-ending Node and any trailing control projections.  We leave
   // these alone.  PhiNodes and ParmNodes are made to follow the block start
   // Node.  Everything else gets topo-sorted.
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
-      tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
-      for (uint i = 0;i < _nodes.size();i++) {
+    if (trace_opto_pipelining()) {
+      tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
+      for (uint i = 0;i < block->number_of_nodes(); i++) {
         tty->print("# ");
-        _nodes[i]->fast_dump();
+        block->get_node(i)->fast_dump();
       }
       tty->print_cr("#");
     }
 #endif
 
   // RootNode is already sorted
-  if( _nodes.size() == 1 ) return true;
+  if (block->number_of_nodes() == 1) {
+    return true;
+  }
 
   // Move PhiNodes and ParmNodes from 1 to cnt up to the start
-  uint node_cnt = end_idx();
+  uint node_cnt = block->end_idx();
   uint phi_cnt = 1;
   uint i;
   for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
-    Node *n = _nodes[i];
+    Node *n = block->get_node(i);
     if( n->is_Phi() ||          // Found a PhiNode or ParmNode
-        (n->is_Proj()  && n->in(0) == head()) ) {
+        (n->is_Proj()  && n->in(0) == block->head()) ) {
       // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
-      _nodes.map(i,_nodes[phi_cnt]);
-      _nodes.map(phi_cnt++,n);  // swap Phi/Parm up front
+      block->map_node(block->get_node(phi_cnt), i);
+      block->map_node(n, phi_cnt++);  // swap Phi/Parm up front
     } else {                    // All others
       // Count block-local inputs to 'n'
       uint cnt = n->len();      // Input count
       uint local = 0;
       for( uint j=0; j<cnt; j++ ) {
         Node *m = n->in(j);
-        if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
+        if( m && get_block_for_node(m) == block && !m->is_top() )
           local++;              // One more block-local input
       }
       ready_cnt.at_put(n->_idx, local); // Count em up
@@ -723,7 +725,7 @@
           for (uint prec = n->req(); prec < n->len(); prec++) {
             Node* oop_store = n->in(prec);
             if (oop_store != NULL) {
-              assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+              assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
             }
           }
         }
@@ -747,16 +749,16 @@
       }
     }
   }
-  for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
-    ready_cnt.at_put(_nodes[i2]->_idx, 0);
+  for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
+    ready_cnt.at_put(block->get_node(i2)->_idx, 0);
 
   // All the prescheduled guys do not hold back internal nodes
   uint i3;
   for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
-    Node *n = _nodes[i3];       // Get pre-scheduled
+    Node *n = block->get_node(i3);       // Get pre-scheduled
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j);
-      if (cfg->get_block_for_node(m) == this) { // Local-block user
+      if (get_block_for_node(m) == block) { // Local-block user
         int m_cnt = ready_cnt.at(m->_idx)-1;
         ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
       }
@@ -767,7 +769,7 @@
   // Make a worklist
   Node_List worklist;
   for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
-    Node *m = _nodes[i4];
+    Node *m = block->get_node(i4);
     if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
       if (m->is_iteratively_computed()) {
         // Push induction variable increments last to allow other uses
@@ -789,15 +791,15 @@
   }
 
   // Warm up the 'next_call' heuristic bits
-  needed_for_next_call(_nodes[0], next_call, cfg);
+  needed_for_next_call(block, block->head(), next_call);
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
-      for (uint j=0; j<_nodes.size(); j++) {
-        Node     *n = _nodes[j];
+    if (trace_opto_pipelining()) {
+      for (uint j=0; j< block->number_of_nodes(); j++) {
+        Node     *n = block->get_node(j);
         int     idx = n->_idx;
         tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
-        tty->print("latency:%3d  ", cfg->get_latency_for_node(n));
+        tty->print("latency:%3d  ", get_latency_for_node(n));
         tty->print("%4d: %s\n", idx, n->Name());
       }
     }
@@ -808,7 +810,7 @@
   while( worklist.size() ) {    // Worklist is not ready
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
+    if (trace_opto_pipelining()) {
       tty->print("#   ready list:");
       for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
         Node *n = worklist[i];      // Get Node on worklist
@@ -819,13 +821,13 @@
 #endif
 
     // Select and pop a ready guy from worklist
-    Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
-    _nodes.map(phi_cnt++,n);    // Schedule him next
+    Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
+    block->map_node(n, phi_cnt++);    // Schedule him next
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
+    if (trace_opto_pipelining()) {
       tty->print("#    select %d: %s", n->_idx, n->Name());
-      tty->print(", latency:%d", cfg->get_latency_for_node(n));
+      tty->print(", latency:%d", get_latency_for_node(n));
       n->dump();
       if (Verbose) {
         tty->print("#   ready list:");
@@ -840,26 +842,26 @@
 #endif
     if( n->is_MachCall() ) {
       MachCallNode *mcall = n->as_MachCall();
-      phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
+      phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
       continue;
     }
 
     if (n->is_Mach() && n->as_Mach()->has_call()) {
       RegMask regs;
-      regs.Insert(matcher.c_frame_pointer());
+      regs.Insert(_matcher.c_frame_pointer());
       regs.OR(n->out_RegMask());
 
-      MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
-      cfg->map_node_to_block(proj, this);
-      _nodes.insert(phi_cnt++, proj);
+      MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
+      map_node_to_block(proj, block);
+      block->insert_node(proj, phi_cnt++);
 
-      add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
+      add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
     }
 
     // Children are now all ready
     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
       Node* m = n->fast_out(i5); // Get user
-      if (cfg->get_block_for_node(m) != this) {
+      if (get_block_for_node(m) != block) {
         continue;
       }
       if( m->is_Phi() ) continue;
@@ -874,9 +876,8 @@
     }
   }
 
-  if( phi_cnt != end_idx() ) {
+  if( phi_cnt != block->end_idx() ) {
     // did not schedule all.  Retry, Bailout, or Die
-    Compile* C = matcher.C;
     if (C->subsume_loads() == true && !C->failing()) {
       // Retry with subsume_loads == false
       // If this is the first failure, the sentinel string will "stick"
@@ -888,12 +889,12 @@
   }
 
 #ifndef PRODUCT
-  if (cfg->trace_opto_pipelining()) {
+  if (trace_opto_pipelining()) {
     tty->print_cr("#");
     tty->print_cr("# after schedule_local");
-    for (uint i = 0;i < _nodes.size();i++) {
+    for (uint i = 0;i < block->number_of_nodes();i++) {
       tty->print("# ");
-      _nodes[i]->fast_dump();
+      block->get_node(i)->fast_dump();
     }
     tty->cr();
   }
@@ -919,7 +920,7 @@
 }
 
 //------------------------------catch_cleanup_find_cloned_def------------------
-static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
+Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
   assert( use_blk != def_blk, "Inter-block cleanup only");
 
   // The use is some block below the Catch.  Find and return the clone of the def
@@ -945,14 +946,14 @@
     // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
     Node_Array inputs = new Node_List(Thread::current()->resource_area());
     for(uint k = 1; k < use_blk->num_preds(); k++) {
-      Block* block = cfg->get_block_for_node(use_blk->pred(k));
-      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
+      Block* block = get_block_for_node(use_blk->pred(k));
+      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
     }
 
     // Check to see if the use_blk already has an identical phi inserted.
     // If it exists, it will be at the first position since all uses of a
     // def are processed together.
-    Node *phi = use_blk->_nodes[1];
+    Node *phi = use_blk->get_node(1);
     if( phi->is_Phi() ) {
       fixup = phi;
       for (uint k = 1; k < use_blk->num_preds(); k++) {
@@ -967,8 +968,8 @@
     // If an existing PhiNode was not found, make a new one.
     if (fixup == NULL) {
       Node *new_phi = PhiNode::make(use_blk->head(), def);
-      use_blk->_nodes.insert(1, new_phi);
-      cfg->map_node_to_block(new_phi, use_blk);
+      use_blk->insert_node(new_phi, 1);
+      map_node_to_block(new_phi, use_blk);
       for (uint k = 1; k < use_blk->num_preds(); k++) {
         new_phi->set_req(k, inputs[k]);
       }
@@ -977,7 +978,7 @@
 
   } else {
     // Found the use just below the Catch.  Make it use the clone.
-    fixup = use_blk->_nodes[n_clone_idx];
+    fixup = use_blk->get_node(n_clone_idx);
   }
 
   return fixup;
@@ -997,36 +998,36 @@
   for( uint k = 0; k < blk->_num_succs; k++ ) {
     // Get clone in each successor block
     Block *sb = blk->_succs[k];
-    Node *clone = sb->_nodes[offset_idx+1];
+    Node *clone = sb->get_node(offset_idx+1);
     assert( clone->Opcode() == use->Opcode(), "" );
 
     // Make use-clone reference the def-clone
-    catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]);
+    catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
   }
 }
 
 //------------------------------catch_cleanup_inter_block---------------------
 // Fix all input edges in use that reference "def".  The use is in a different
 // block than the def.
-static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
+void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
   if( !use_blk ) return;        // Can happen if the use is a precedence edge
 
-  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
+  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
   catch_cleanup_fix_all_inputs(use, def, new_def);
 }
 
 //------------------------------call_catch_cleanup-----------------------------
 // If we inserted any instructions between a Call and his CatchNode,
 // clone the instructions on all paths below the Catch.
-void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
+void PhaseCFG::call_catch_cleanup(Block* block) {
 
   // End of region to clone
-  uint end = end_idx();
-  if( !_nodes[end]->is_Catch() ) return;
+  uint end = block->end_idx();
+  if( !block->get_node(end)->is_Catch() ) return;
   // Start of region to clone
   uint beg = end;
-  while(!_nodes[beg-1]->is_MachProj() ||
-        !_nodes[beg-1]->in(0)->is_MachCall() ) {
+  while(!block->get_node(beg-1)->is_MachProj() ||
+        !block->get_node(beg-1)->in(0)->is_MachCall() ) {
     beg--;
     assert(beg > 0,"Catch cleanup walking beyond block boundary");
   }
@@ -1035,15 +1036,15 @@
 
   // Clone along all Catch output paths.  Clone area between the 'beg' and
   // 'end' indices.
-  for( uint i = 0; i < _num_succs; i++ ) {
-    Block *sb = _succs[i];
+  for( uint i = 0; i < block->_num_succs; i++ ) {
+    Block *sb = block->_succs[i];
     // Clone the entire area; ignoring the edge fixup for now.
     for( uint j = end; j > beg; j-- ) {
       // It is safe here to clone a node with anti_dependence
       // since clones dominate on each path.
-      Node *clone = _nodes[j-1]->clone();
-      sb->_nodes.insert( 1, clone );
-      cfg->map_node_to_block(clone, sb);
+      Node *clone = block->get_node(j-1)->clone();
+      sb->insert_node(clone, 1);
+      map_node_to_block(clone, sb);
     }
   }
 
@@ -1051,7 +1052,7 @@
   // Fixup edges.  Check the def-use info per cloned Node
   for(uint i2 = beg; i2 < end; i2++ ) {
     uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
-    Node *n = _nodes[i2];        // Node that got cloned
+    Node *n = block->get_node(i2);        // Node that got cloned
     // Need DU safe iterator because of edge manipulation in calls.
     Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
     for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
@@ -1060,19 +1061,19 @@
     uint max = out->size();
     for (uint j = 0; j < max; j++) {// For all users
       Node *use = out->pop();
-      Block *buse = cfg->get_block_for_node(use);
+      Block *buse = get_block_for_node(use);
       if( use->is_Phi() ) {
         for( uint k = 1; k < use->req(); k++ )
           if( use->in(k) == n ) {
-            Block* block = cfg->get_block_for_node(buse->pred(k));
-            Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
+            Block* b = get_block_for_node(buse->pred(k));
+            Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
             use->set_req(k, fixup);
           }
       } else {
-        if (this == buse) {
-          catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
+        if (block == buse) {
+          catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
         } else {
-          catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
+          catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
         }
       }
     } // End for all users
@@ -1081,30 +1082,30 @@
 
   // Remove the now-dead cloned ops
   for(uint i3 = beg; i3 < end; i3++ ) {
-    _nodes[beg]->disconnect_inputs(NULL, C);
-    _nodes.remove(beg);
+    block->get_node(beg)->disconnect_inputs(NULL, C);
+    block->remove_node(beg);
   }
 
   // If the successor blocks have a CreateEx node, move it back to the top
-  for(uint i4 = 0; i4 < _num_succs; i4++ ) {
-    Block *sb = _succs[i4];
+  for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
+    Block *sb = block->_succs[i4];
     uint new_cnt = end - beg;
     // Remove any newly created, but dead, nodes.
     for( uint j = new_cnt; j > 0; j-- ) {
-      Node *n = sb->_nodes[j];
+      Node *n = sb->get_node(j);
       if (n->outcnt() == 0 &&
           (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
         n->disconnect_inputs(NULL, C);
-        sb->_nodes.remove(j);
+        sb->remove_node(j);
         new_cnt--;
       }
     }
     // If any newly created nodes remain, move the CreateEx node to the top
     if (new_cnt > 0) {
-      Node *cex = sb->_nodes[1+new_cnt];
+      Node *cex = sb->get_node(1+new_cnt);
       if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
-        sb->_nodes.remove(1+new_cnt);
-        sb->_nodes.insert(1,cex);
+        sb->remove_node(1+new_cnt);
+        sb->insert_node(cex, 1);
       }
     }
   }
--- a/hotspot/src/share/vm/opto/library_call.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -2756,10 +2756,28 @@
       newval = _gvn.makecon(TypePtr::NULL_PTR);
 
     // Reference stores need a store barrier.
-    pre_barrier(true /* do_load*/,
-                control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
-                NULL /* pre_val*/,
-                T_OBJECT);
+    if (kind == LS_xchg) {
+      // If pre-barrier must execute before the oop store, old value will require do_load here.
+      if (!can_move_pre_barrier()) {
+        pre_barrier(true /* do_load*/,
+                    control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
+                    NULL /* pre_val*/,
+                    T_OBJECT);
+      } // Else move pre_barrier to use load_store value, see below.
+    } else if (kind == LS_cmpxchg) {
+      // Same as for newval above:
+      if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
+        oldval = _gvn.makecon(TypePtr::NULL_PTR);
+      }
+      // The only known value which might get overwritten is oldval.
+      pre_barrier(false /* do_load */,
+                  control(), NULL, NULL, max_juint, NULL, NULL,
+                  oldval /* pre_val */,
+                  T_OBJECT);
+    } else {
+      ShouldNotReachHere();
+    }
+
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@@ -2795,16 +2813,27 @@
   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
   set_memory(proj, alias_idx);
 
+  if (type == T_OBJECT && kind == LS_xchg) {
+#ifdef _LP64
+    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+      load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
+    }
+#endif
+    if (can_move_pre_barrier()) {
+      // Don't need to load pre_val. The old value is returned by load_store.
+      // The pre_barrier can execute after the xchg as long as no safepoint
+      // gets inserted between them.
+      pre_barrier(false /* do_load */,
+                  control(), NULL, NULL, max_juint, NULL, NULL,
+                  load_store /* pre_val */,
+                  T_OBJECT);
+    }
+  }
+
   // Add the trailing membar surrounding the access
   insert_mem_bar(Op_MemBarCPUOrder);
   insert_mem_bar(Op_MemBarAcquire);
 
-#ifdef _LP64
-  if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
-    load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
-  }
-#endif
-
   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
   set_result(load_store);
   return true;
--- a/hotspot/src/share/vm/opto/live.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/live.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -85,8 +85,8 @@
     IndexSet* def = &_defs[block->_pre_order-1];
     DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
     uint i;
-    for (i = block->_nodes.size(); i > 1; i--) {
-      Node* n = block->_nodes[i-1];
+    for (i = block->number_of_nodes(); i > 1; i--) {
+      Node* n = block->get_node(i-1);
       if (n->is_Phi()) {
         break;
       }
@@ -112,7 +112,7 @@
 #endif
     // Remove anything defined by Phis and the block start instruction
     for (uint k = i; k > 0; k--) {
-      uint r = _names[block->_nodes[k - 1]->_idx];
+      uint r = _names[block->get_node(k - 1)->_idx];
       def->insert(r);
       use->remove(r);
     }
@@ -124,7 +124,7 @@
 
       // PhiNode uses go in the live-out set of prior blocks.
       for (uint k = i; k > 0; k--) {
-        add_liveout(p, _names[block->_nodes[k-1]->in(l)->_idx], first_pass);
+        add_liveout(p, _names[block->get_node(k-1)->in(l)->_idx], first_pass);
       }
     }
     freeset(block);
@@ -254,10 +254,10 @@
 void PhaseLive::dump( const Block *b ) const {
   tty->print("Block %d: ",b->_pre_order);
   tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
-  uint cnt = b->_nodes.size();
+  uint cnt = b->number_of_nodes();
   for( uint i=0; i<cnt; i++ ) {
-    tty->print("L%d/", _names[b->_nodes[i]->_idx] );
-    b->_nodes[i]->dump();
+    tty->print("L%d/", _names[b->get_node(i)->_idx] );
+    b->get_node(i)->dump();
   }
   tty->print("\n");
 }
@@ -269,7 +269,7 @@
   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
     Block* block = _cfg.get_block(i);
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j-1];
+      Node* n = block->get_node(j-1);
       if (n->is_Phi()) {
         break;
       }
--- a/hotspot/src/share/vm/opto/macro.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -72,6 +72,8 @@
   int jvms_adj  = new_dbg_start - old_dbg_start;
   assert (new_dbg_start == newcall->req(), "argument count mismatch");
 
+  // SafePointScalarObject node could be referenced several times in debug info.
+  // Use Dict to record cloned nodes.
   Dict* sosn_map = new Dict(cmpkey,hashkey);
   for (uint i = old_dbg_start; i < oldcall->req(); i++) {
     Node* old_in = oldcall->in(i);
@@ -79,8 +81,8 @@
     if (old_in != NULL && old_in->is_SafePointScalarObject()) {
       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
       uint old_unique = C->unique();
-      Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
-      if (old_unique != C->unique()) {
+      Node* new_in = old_sosn->clone(sosn_map);
+      if (old_unique != C->unique()) { // New node?
         new_in->set_req(0, C->root()); // reset control edge
         new_in = transform_later(new_in); // Register new node.
       }
@@ -725,7 +727,11 @@
   while (safepoints.length() > 0) {
     SafePointNode* sfpt = safepoints.pop();
     Node* mem = sfpt->memory();
-    uint first_ind = sfpt->req();
+    assert(sfpt->jvms() != NULL, "missed JVMS");
+    // Fields of scalar objs are referenced only at the end
+    // of regular debuginfo at the last (youngest) JVMS.
+    // Record relative start index.
+    uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
     SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
 #ifdef ASSERT
                                                  alloc,
@@ -799,7 +805,7 @@
           for (int i = start; i < end; i++) {
             if (sfpt_done->in(i)->is_SafePointScalarObject()) {
               SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
-              if (scobj->first_index() == sfpt_done->req() &&
+              if (scobj->first_index(jvms) == sfpt_done->req() &&
                   scobj->n_fields() == (uint)nfields) {
                 assert(scobj->alloc() == alloc, "sanity");
                 sfpt_done->set_req(i, res);
--- a/hotspot/src/share/vm/opto/node.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/node.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -773,6 +773,21 @@
   _in[_cnt] = NULL;       // NULL out emptied slot
 }
 
+//------------------------------del_req_ordered--------------------------------
+// Delete the required edge and compact the edge array with preserved order
+void Node::del_req_ordered( uint idx ) {
+  assert( idx < _cnt, "oob");
+  assert( !VerifyHashTableKeys || _hash_lock == 0,
+          "remove node from hash table before modifying it");
+  // First remove corresponding def-use edge
+  Node *n = in(idx);
+  if (n != NULL) n->del_out((Node *)this);
+  if (idx < _cnt - 1) { // Not last edge ?
+    Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*)));
+  }
+  _in[--_cnt] = NULL;   // NULL out emptied slot
+}
+
 //------------------------------ins_req----------------------------------------
 // Insert a new required input at the end
 void Node::ins_req( uint idx, Node *n ) {
--- a/hotspot/src/share/vm/opto/node.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -384,6 +384,7 @@
   void add_req( Node *n ); // Append a NEW required input
   void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
   void del_req( uint idx ); // Delete required edge & compact
+  void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
   void ins_req( uint i, Node *n ); // Insert a NEW required input
   void set_req( uint i, Node *n ) {
     assert( is_not_dead(n), "can not use dead node");
--- a/hotspot/src/share/vm/opto/output.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -57,7 +57,7 @@
 // Convert Nodes to instruction bits and pass off to the VM
 void Compile::Output() {
   // RootNode goes
-  assert( _cfg->get_root_block()->_nodes.size() == 0, "" );
+  assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
 
   // The number of new nodes (mostly MachNop) is proportional to
   // the number of java calls and inner loops which are aligned.
@@ -70,11 +70,11 @@
   Block *entry = _cfg->get_block(1);
   Block *broot = _cfg->get_root_block();
 
-  const StartNode *start = entry->_nodes[0]->as_Start();
+  const StartNode *start = entry->head()->as_Start();
 
   // Replace StartNode with prolog
   MachPrologNode *prolog = new (this) MachPrologNode();
-  entry->_nodes.map( 0, prolog );
+  entry->map_node(prolog, 0);
   _cfg->map_node_to_block(prolog, entry);
   _cfg->unmap_node_from_block(start); // start is no longer in any block
 
@@ -144,8 +144,8 @@
     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
       tty->print("\nBB#%03d:\n", i);
       Block* block = _cfg->get_block(i);
-      for (uint j = 0; j < block->_nodes.size(); j++) {
-        Node* n = block->_nodes[j];
+      for (uint j = 0; j < block->number_of_nodes(); j++) {
+        Node* n = block->get_node(j);
         OptoReg::Name reg = _regalloc->get_reg_first(n);
         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
         n->dump();
@@ -226,8 +226,8 @@
   // Insert call to zap runtime stub before every node with an oop map
   for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
     Block *b = _cfg->get_block(i);
-    for ( uint j = 0;  j < b->_nodes.size();  ++j ) {
-      Node *n = b->_nodes[j];
+    for ( uint j = 0;  j < b->number_of_nodes();  ++j ) {
+      Node *n = b->get_node(j);
 
       // Determining if we should insert a zap-a-lot node in output.
       // We do that for all nodes that has oopmap info, except for calls
@@ -256,7 +256,7 @@
         }
         if (insert) {
           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
-          b->_nodes.insert( j, zap );
+          b->insert_node(zap, j);
           _cfg->map_node_to_block(zap, b);
           ++j;
         }
@@ -379,10 +379,10 @@
     DEBUG_ONLY( jmp_rule[i]   = 0; )
 
     // Sum all instruction sizes to compute block size
-    uint last_inst = block->_nodes.size();
+    uint last_inst = block->number_of_nodes();
     uint blk_size = 0;
     for (uint j = 0; j < last_inst; j++) {
-      Node* nj = block->_nodes[j];
+      Node* nj = block->get_node(j);
       // Handle machine instruction nodes
       if (nj->is_Mach()) {
         MachNode *mach = nj->as_Mach();
@@ -477,18 +477,18 @@
     for (uint i = 0; i < nblocks; i++) {
       Block* block = _cfg->get_block(i);
       int idx = jmp_nidx[i];
-      MachNode* mach = (idx == -1) ? NULL: block->_nodes[idx]->as_Mach();
+      MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
       if (mach != NULL && mach->may_be_short_branch()) {
 #ifdef ASSERT
         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
         int j;
         // Find the branch; ignore trailing NOPs.
-        for (j = block->_nodes.size()-1; j>=0; j--) {
-          Node* n = block->_nodes[j];
+        for (j = block->number_of_nodes()-1; j>=0; j--) {
+          Node* n = block->get_node(j);
           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
             break;
         }
-        assert(j >= 0 && j == idx && block->_nodes[j] == (Node*)mach, "sanity");
+        assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
 #endif
         int br_size = jmp_size[i];
         int br_offs = blk_starts[i] + jmp_offset[i];
@@ -522,7 +522,7 @@
             diff -= nop_size;
           }
           adjust_block_start += diff;
-          block->_nodes.map(idx, replacement);
+          block->map_node(replacement, idx);
           mach->subsume_by(replacement, C);
           mach = replacement;
           progress = true;
@@ -639,7 +639,7 @@
                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
       Compile::set_sv_for_object_node(objs, sv);
 
-      uint first_ind = spobj->first_index();
+      uint first_ind = spobj->first_index(sfpt->jvms());
       for (uint i = 0; i < spobj->n_fields(); i++) {
         Node* fld_node = sfpt->in(first_ind+i);
         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
@@ -894,7 +894,7 @@
     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
 
     // Loop over monitors and insert into array
-    for(idx = 0; idx < num_mon; idx++) {
+    for (idx = 0; idx < num_mon; idx++) {
       // Grab the node that defines this monitor
       Node* box_node = sfn->monitor_box(jvms, idx);
       Node* obj_node = sfn->monitor_obj(jvms, idx);
@@ -902,11 +902,11 @@
       // Create ScopeValue for object
       ScopeValue *scval = NULL;
 
-      if( obj_node->is_SafePointScalarObject() ) {
+      if (obj_node->is_SafePointScalarObject()) {
         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
         scval = Compile::sv_for_node_id(objs, spobj->_idx);
         if (scval == NULL) {
-          const Type *t = obj_node->bottom_type();
+          const Type *t = spobj->bottom_type();
           ciKlass* cik = t->is_oopptr()->klass();
           assert(cik->is_instance_klass() ||
                  cik->is_array_klass(), "Not supported allocation.");
@@ -914,14 +914,14 @@
                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
           Compile::set_sv_for_object_node(objs, sv);
 
-          uint first_ind = spobj->first_index();
+          uint first_ind = spobj->first_index(youngest_jvms);
           for (uint i = 0; i < spobj->n_fields(); i++) {
             Node* fld_node = sfn->in(first_ind+i);
             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
           }
           scval = sv;
         }
-      } else if( !obj_node->is_Con() ) {
+      } else if (!obj_node->is_Con()) {
         OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
         if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
           scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
@@ -1088,8 +1088,8 @@
     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
       Block* b = _cfg->get_block(i);
 
-      for (uint j = 0; j < b->_nodes.size(); j++) {
-        Node* n = b->_nodes[j];
+      for (uint j = 0; j < b->number_of_nodes(); j++) {
+        Node* n = b->get_node(j);
 
         // If the node is a MachConstantNode evaluate the constant
         // value section.
@@ -1247,14 +1247,14 @@
     // Define the label at the beginning of the basic block
     MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
 
-    uint last_inst = block->_nodes.size();
+    uint last_inst = block->number_of_nodes();
 
     // Emit block normally, except for last instruction.
     // Emit means "dump code bits into code buffer".
     for (uint j = 0; j<last_inst; j++) {
 
       // Get the node
-      Node* n = block->_nodes[j];
+      Node* n = block->get_node(j);
 
       // See if delay slots are supported
       if (valid_bundle_info(n) &&
@@ -1308,7 +1308,7 @@
           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
           int nops_cnt = padding / nop_size;
           MachNode *nop = new (this) MachNopNode(nops_cnt);
-          block->_nodes.insert(j++, nop);
+          block->insert_node(nop, j++);
           last_inst++;
           _cfg->map_node_to_block(nop, block);
           nop->emit(*cb, _regalloc);
@@ -1394,7 +1394,7 @@
               // Insert padding between avoid_back_to_back branches.
               if (needs_padding && replacement->avoid_back_to_back()) {
                 MachNode *nop = new (this) MachNopNode();
-                block->_nodes.insert(j++, nop);
+                block->insert_node(nop, j++);
                 _cfg->map_node_to_block(nop, block);
                 last_inst++;
                 nop->emit(*cb, _regalloc);
@@ -1407,7 +1407,7 @@
               jmp_size[i]   = new_size;
               jmp_rule[i]   = mach->rule();
 #endif
-              block->_nodes.map(j, replacement);
+              block->map_node(replacement, j);
               mach->subsume_by(replacement, C);
               n    = replacement;
               mach = replacement;
@@ -1438,7 +1438,7 @@
             count++;
             uint i4;
             for (i4 = 0; i4 < last_inst; ++i4) {
-              if (block->_nodes[i4] == oop_store) {
+              if (block->get_node(i4) == oop_store) {
                 break;
               }
             }
@@ -1548,7 +1548,7 @@
       int padding = nb->alignment_padding(current_offset);
       if( padding > 0 ) {
         MachNode *nop = new (this) MachNopNode(padding / nop_size);
-        block->_nodes.insert(block->_nodes.size(), nop);
+        block->insert_node(nop, block->number_of_nodes());
         _cfg->map_node_to_block(nop, block);
         nop->emit(*cb, _regalloc);
         current_offset = cb->insts_size();
@@ -1655,8 +1655,8 @@
     int j;
 
     // Find the branch; ignore trailing NOPs.
-    for (j = block->_nodes.size() - 1; j >= 0; j--) {
-      n = block->_nodes[j];
+    for (j = block->number_of_nodes() - 1; j >= 0; j--) {
+      n = block->get_node(j);
       if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
         break;
       }
@@ -1675,8 +1675,8 @@
       uint call_return = call_returns[block->_pre_order];
 #ifdef ASSERT
       assert( call_return > 0, "no call seen for this basic block" );
-      while (block->_nodes[--j]->is_MachProj()) ;
-      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
+      while (block->get_node(--j)->is_MachProj()) ;
+      assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
 #endif
       // last instruction is a CatchNode, find it's CatchProjNodes
       int nof_succs = block->_num_succs;
@@ -1782,7 +1782,7 @@
   // Get the last node
   Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
 
-  _next_node = block->_nodes[block->_nodes.size() - 1];
+  _next_node = block->get_node(block->number_of_nodes() - 1);
 }
 
 #ifndef PRODUCT
@@ -1875,7 +1875,7 @@
     // Used to allow latency 0 to force an instruction to the beginning
     // of the bb
     uint latency = 1;
-    Node *use = bb->_nodes[j];
+    Node *use = bb->get_node(j);
     uint nlen = use->len();
 
     // Walk over all the inputs
@@ -2286,7 +2286,7 @@
        (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
 
     // Push any trailing projections
-    if( bb->_nodes[bb->_nodes.size()-1] != n ) {
+    if( bb->get_node(bb->number_of_nodes()-1) != n ) {
       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
         Node *foi = n->fast_out(i);
         if( foi->is_Proj() )
@@ -2329,21 +2329,21 @@
   _unconditional_delay_slot = NULL;
 
 #ifdef ASSERT
-  for( uint i=0; i < bb->_nodes.size(); i++ )
-    assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
+  for( uint i=0; i < bb->number_of_nodes(); i++ )
+    assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
 #endif
 
   // Force the _uses count to never go to zero for unscheduable pieces
   // of the block
   for( uint k = 0; k < _bb_start; k++ )
-    _uses[bb->_nodes[k]->_idx] = 1;
-  for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
-    _uses[bb->_nodes[l]->_idx] = 1;
+    _uses[bb->get_node(k)->_idx] = 1;
+  for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
+    _uses[bb->get_node(l)->_idx] = 1;
 
   // Iterate backwards over the instructions in the block.  Don't count the
   // branch projections at end or the block header instructions.
   for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
-    Node *n = bb->_nodes[j];
+    Node *n = bb->get_node(j);
     if( n->is_Proj() ) continue; // Projections handled another way
 
     // Account for all uses
@@ -2398,8 +2398,8 @@
 #ifndef PRODUCT
     if (_cfg->C->trace_opto_output()) {
       tty->print("#  Schedule BB#%03d (initial)\n", i);
-      for (uint j = 0; j < bb->_nodes.size(); j++) {
-        bb->_nodes[j]->dump();
+      for (uint j = 0; j < bb->number_of_nodes(); j++) {
+        bb->get_node(j)->dump();
       }
     }
 #endif
@@ -2426,10 +2426,10 @@
     }
 
     // Leave untouched the starting instruction, any Phis, a CreateEx node
-    // or Top.  bb->_nodes[_bb_start] is the first schedulable instruction.
-    _bb_end = bb->_nodes.size()-1;
+    // or Top.  bb->get_node(_bb_start) is the first schedulable instruction.
+    _bb_end = bb->number_of_nodes()-1;
     for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
-      Node *n = bb->_nodes[_bb_start];
+      Node *n = bb->get_node(_bb_start);
       // Things not matched, like Phinodes and ProjNodes don't get scheduled.
       // Also, MachIdealNodes do not get scheduled
       if( !n->is_Mach() ) continue;     // Skip non-machine nodes
@@ -2449,19 +2449,19 @@
     // in the block), because they have delay slots we can fill.  Calls all
     // have their delay slots filled in the template expansions, so we don't
     // bother scheduling them.
-    Node *last = bb->_nodes[_bb_end];
+    Node *last = bb->get_node(_bb_end);
     // Ignore trailing NOPs.
     while (_bb_end > 0 && last->is_Mach() &&
            last->as_Mach()->ideal_Opcode() == Op_Con) {
-      last = bb->_nodes[--_bb_end];
+      last = bb->get_node(--_bb_end);
     }
     assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
     if( last->is_Catch() ||
        // Exclude unreachable path case when Halt node is in a separate block.
        (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
       // There must be a prior call.  Skip it.
-      while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
-        assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
+      while( !bb->get_node(--_bb_end)->is_MachCall() ) {
+        assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
       }
     } else if( last->is_MachNullCheck() ) {
       // Backup so the last null-checked memory instruction is
@@ -2470,7 +2470,7 @@
       Node *mem = last->in(1);
       do {
         _bb_end--;
-      } while (mem != bb->_nodes[_bb_end]);
+      } while (mem != bb->get_node(_bb_end));
     } else {
       // Set _bb_end to point after last schedulable inst.
       _bb_end++;
@@ -2499,7 +2499,7 @@
     assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
 #ifdef ASSERT
     for( uint l = _bb_start; l < _bb_end; l++ ) {
-      Node *n = bb->_nodes[l];
+      Node *n = bb->get_node(l);
       uint m;
       for( m = 0; m < _bb_end-_bb_start; m++ )
         if( _scheduled[m] == n )
@@ -2510,14 +2510,14 @@
 
     // Now copy the instructions (in reverse order) back to the block
     for ( uint k = _bb_start; k < _bb_end; k++ )
-      bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
+      bb->map_node(_scheduled[_bb_end-k-1], k);
 
 #ifndef PRODUCT
     if (_cfg->C->trace_opto_output()) {
       tty->print("#  Schedule BB#%03d (final)\n", i);
       uint current = 0;
-      for (uint j = 0; j < bb->_nodes.size(); j++) {
-        Node *n = bb->_nodes[j];
+      for (uint j = 0; j < bb->number_of_nodes(); j++) {
+        Node *n = bb->get_node(j);
         if( valid_bundle_info(n) ) {
           Bundle *bundle = node_bundling(n);
           if (bundle->instr_count() > 0 || bundle->flags() > 0) {
@@ -2579,8 +2579,8 @@
   // Walk over the block backwards.  Check to make sure each DEF doesn't
   // kill a live value (other than the one it's supposed to).  Add each
   // USE to the live set.
-  for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
-    Node *n = b->_nodes[i];
+  for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
+    Node *n = b->get_node(i);
     int n_op = n->Opcode();
     if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
       // Fat-proj kills a slew of registers
@@ -2711,7 +2711,7 @@
         pinch->req() == 1 ) {   // pinch not yet in block?
       pinch->del_req(0);        // yank pointer to later-def, also set flag
       // Insert the pinch-point in the block just after the last use
-      b->_nodes.insert(b->find_node(use)+1,pinch);
+      b->insert_node(pinch, b->find_node(use) + 1);
       _bb_end++;                // Increase size scheduled region in block
     }
 
@@ -2763,10 +2763,10 @@
   // it being in the current block.
   bool fat_proj_seen = false;
   uint last_safept = _bb_end-1;
-  Node* end_node         = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
+  Node* end_node         = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
   Node* last_safept_node = end_node;
   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
       // Fat-proj kills a slew of registers
@@ -2815,7 +2815,7 @@
     // Do not allow defs of new derived values to float above GC
     // points unless the base is definitely available at the GC point.
 
-    Node *m = b->_nodes[i];
+    Node *m = b->get_node(i);
 
     // Add precedence edge from following safepoint to use of derived pointer
     if( last_safept_node != end_node &&
@@ -2832,11 +2832,11 @@
 
     if( n->jvms() ) {           // Precedence edge from derived to safept
       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
-      if( b->_nodes[last_safept] != last_safept_node ) {
+      if( b->get_node(last_safept) != last_safept_node ) {
         last_safept = b->find_node(last_safept_node);
       }
       for( uint j=last_safept; j > i; j-- ) {
-        Node *mach = b->_nodes[j];
+        Node *mach = b->get_node(j);
         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
           mach->add_prec( n );
       }
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1648,10 +1648,10 @@
     bool block_not_printed = true;
 
     // and each instruction within a block
-    uint end_index = block->_nodes.size();
+    uint end_index = block->number_of_nodes();
     // block->end_idx() not valid after PhaseRegAlloc
     for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) {
-      Node     *n = block->_nodes.at(instruction_index);
+      Node     *n = block->get_node(instruction_index);
       if( n->is_Mach() ) {
         MachNode *m = n->as_Mach();
         int deleted_count = 0;
@@ -1673,7 +1673,7 @@
             }
             // Print instructions being deleted
             for( int i = (deleted_count - 1); i >= 0; --i ) {
-              block->_nodes.at(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
+              block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
             }
             tty->print_cr("replaced with");
             // Print new instruction
@@ -1687,11 +1687,11 @@
           //  the node index to live range mappings.)
           uint safe_instruction_index = (instruction_index - deleted_count);
           for( ; (instruction_index > safe_instruction_index); --instruction_index ) {
-            block->_nodes.remove( instruction_index );
+            block->remove_node( instruction_index );
           }
           // install new node after safe_instruction_index
-          block->_nodes.insert( safe_instruction_index + 1, m2 );
-          end_index = block->_nodes.size() - 1; // Recompute new block size
+          block->insert_node(m2, safe_instruction_index + 1);
+          end_index = block->number_of_nodes() - 1; // Recompute new block size
           NOT_PRODUCT( inc_peepholes(); )
         }
       }
--- a/hotspot/src/share/vm/opto/postaloc.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/postaloc.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -423,8 +423,8 @@
 
     // Count of Phis in block
     uint phi_dex;
-    for (phi_dex = 1; phi_dex < block->_nodes.size(); phi_dex++) {
-      Node* phi = block->_nodes[phi_dex];
+    for (phi_dex = 1; phi_dex < block->number_of_nodes(); phi_dex++) {
+      Node* phi = block->get_node(phi_dex);
       if (!phi->is_Phi()) {
         break;
       }
@@ -439,7 +439,7 @@
       Block* pb = _cfg.get_block_for_node(block->pred(j));
       // Remove copies along phi edges
       for (uint k = 1; k < phi_dex; k++) {
-        elide_copy(block->_nodes[k], j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
+        elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
       }
       if (blk2value[pb->_pre_order]) { // Have a mapping on this edge?
         // See if this predecessor's mappings have been used by everybody
@@ -510,7 +510,7 @@
     // For all Phi's
     for (j = 1; j < phi_dex; j++) {
       uint k;
-      Node *phi = block->_nodes[j];
+      Node *phi = block->get_node(j);
       uint pidx = _lrg_map.live_range_id(phi);
       OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
 
@@ -522,7 +522,7 @@
           u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
       }
       if (u != NodeSentinel) {    // Junk Phi.  Remove
-        block->_nodes.remove(j--);
+        block->remove_node(j--);
         phi_dex--;
         _cfg.unmap_node_from_block(phi);
         phi->replace_by(u);
@@ -552,8 +552,8 @@
     }
 
     // For all remaining instructions
-    for (j = phi_dex; j < block->_nodes.size(); j++) {
-      Node* n = block->_nodes[j];
+    for (j = phi_dex; j < block->number_of_nodes(); j++) {
+      Node* n = block->get_node(j);
 
       if(n->outcnt() == 0 &&   // Dead?
          n != C->top() &&      // (ignore TOP, it has no du info)
--- a/hotspot/src/share/vm/opto/reg_split.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/opto/reg_split.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -112,17 +112,17 @@
 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
   // Skip intervening ProjNodes.  Do not insert between a ProjNode and
   // its definer.
-  while( i < b->_nodes.size() &&
-         (b->_nodes[i]->is_Proj() ||
-          b->_nodes[i]->is_Phi() ) )
+  while( i < b->number_of_nodes() &&
+         (b->get_node(i)->is_Proj() ||
+          b->get_node(i)->is_Phi() ) )
     i++;
 
   // Do not insert between a call and his Catch
-  if( b->_nodes[i]->is_Catch() ) {
+  if( b->get_node(i)->is_Catch() ) {
     // Put the instruction at the top of the fall-thru block.
     // Find the fall-thru projection
     while( 1 ) {
-      const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj();
+      const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
       if( cp->_con == CatchProjNode::fall_through_index )
         break;
     }
@@ -131,7 +131,7 @@
     i = 1;                      // Right at start of block
   }
 
-  b->_nodes.insert(i,spill);    // Insert node in block
+  b->insert_node(spill, i);    // Insert node in block
   _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
   // Adjust the point where we go hi-pressure
   if( i <= b->_ihrp_index ) b->_ihrp_index++;
@@ -160,9 +160,9 @@
   // (The implicit_null_check function ensures the use is also dominated
   // by the branch-not-taken block.)
   Node *be = b->end();
-  if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) {
+  if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
     // Spill goes in the branch-not-taken block
-    b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue];
+    b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
     loc = 0;                    // Just past the Region
   }
   assert( loc >= 0, "must insert past block head" );
@@ -450,7 +450,7 @@
 
   // Scan block for 1st use.
   for( uint i = 1; i <= b->end_idx(); i++ ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Ignore PHI use, these can be up or down
     if (n->is_Phi()) {
       continue;
@@ -647,7 +647,7 @@
 
       // check block for appropriate phinode & update edges
       for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
-        n1 = b->_nodes[insidx];
+        n1 = b->get_node(insidx);
         // bail if this is not a phi
         phi = n1->is_Phi() ? n1->as_Phi() : NULL;
         if( phi == NULL ) {
@@ -747,7 +747,7 @@
     //----------Walk Instructions in the Block and Split----------
     // For all non-phi instructions in the block
     for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
-      Node *n = b->_nodes[insidx];
+      Node *n = b->get_node(insidx);
       // Find the defining Node's live range index
       uint defidx = _lrg_map.find_id(n);
       uint cnt = n->req();
@@ -776,7 +776,7 @@
               assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
               n->replace_by(u); // Then replace with unique input
               n->disconnect_inputs(NULL, C);
-              b->_nodes.remove(insidx);
+              b->remove_node(insidx);
               insidx--;
               b->_ihrp_index--;
               b->_fhrp_index--;
@@ -789,12 +789,12 @@
               (b->_reg_pressure < (uint)INTPRESSURE) ||
               b->_ihrp_index > 4000000 ||
               b->_ihrp_index >= b->end_idx() ||
-              !b->_nodes[b->_ihrp_index]->is_Proj(), "" );
+              !b->get_node(b->_ihrp_index)->is_Proj(), "" );
       assert( insidx > b->_fhrp_index ||
               (b->_freg_pressure < (uint)FLOATPRESSURE) ||
               b->_fhrp_index > 4000000 ||
               b->_fhrp_index >= b->end_idx() ||
-              !b->_nodes[b->_fhrp_index]->is_Proj(), "" );
+              !b->get_node(b->_fhrp_index)->is_Proj(), "" );
 
       // ********** Handle Crossing HRP Boundry **********
       if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
@@ -819,7 +819,7 @@
                 // Insert point is just past last use or def in the block
                 int insert_point = insidx-1;
                 while( insert_point > 0 ) {
-                  Node *n = b->_nodes[insert_point];
+                  Node *n = b->get_node(insert_point);
                   // Hit top of block?  Quit going backwards
                   if (n->is_Phi()) {
                     break;
@@ -865,7 +865,7 @@
             }
           }  // end if LRG is UP
         }  // end for all spilling live ranges
-        assert( b->_nodes[insidx] == n, "got insidx set incorrectly" );
+        assert( b->get_node(insidx) == n, "got insidx set incorrectly" );
       }  // end if crossing HRP Boundry
 
       // If the LRG index is oob, then this is a new spillcopy, skip it.
@@ -878,7 +878,7 @@
       if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
         n->replace_by( n->in(copyidx) );
         n->set_req( copyidx, NULL );
-        b->_nodes.remove(insidx--);
+        b->remove_node(insidx--);
         b->_ihrp_index--; // Adjust the point where we go hi-pressure
         b->_fhrp_index--;
         continue;
@@ -932,10 +932,10 @@
             // Rematerializable?  Then clone def at use site instead
             // of store/load
             if( def->rematerialize() ) {
-              int old_size = b->_nodes.size();
+              int old_size = b->number_of_nodes();
               def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
               if( !def ) return 0; // Bail out
-              insidx += b->_nodes.size()-old_size;
+              insidx += b->number_of_nodes()-old_size;
             }
 
             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
@@ -1332,8 +1332,8 @@
         // so look at the node before it.
         int insert = pred->end_idx();
         while (insert >= 1 &&
-               pred->_nodes[insert - 1]->is_SpillCopy() &&
-               _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
+               pred->get_node(insert - 1)->is_SpillCopy() &&
+               _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
           insert--;
         }
         def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
@@ -1402,7 +1402,7 @@
   for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
     b  = _cfg.get_block(bidx);
     for (insidx = 0; insidx <= b->end_idx(); insidx++) {
-      Node *n = b->_nodes[insidx];
+      Node *n = b->get_node(insidx);
       uint defidx = _lrg_map.find(n);
       assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
       assert(defidx < maxlrg,"Bad live range index in Split");
--- a/hotspot/src/share/vm/runtime/globals.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -205,6 +205,7 @@
 
 #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
 #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
+#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT },
 #ifdef PRODUCT
   #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
   #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
@@ -260,7 +261,7 @@
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
 #endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
+ C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Mon Sep 02 13:13:45 2013 +0200
@@ -57,6 +57,7 @@
 
 #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
+#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
 #ifdef PRODUCT
   #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
   #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
@@ -99,7 +100,7 @@
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
 #endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
+ C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
 #endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
@@ -131,6 +132,7 @@
 
 #define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
 #define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
 #ifdef PRODUCT
   #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
   #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
@@ -204,6 +206,7 @@
           C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
           C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+          C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
           C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
 #endif
 #ifdef COMPILER2
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -1051,7 +1051,8 @@
 
   // Find receiver for non-static call
   if (bc != Bytecodes::_invokestatic &&
-      bc != Bytecodes::_invokedynamic) {
+      bc != Bytecodes::_invokedynamic &&
+      bc != Bytecodes::_invokehandle) {
     // This register map must be update since we need to find the receiver for
     // compiled frames. The receiver might be in a register.
     RegisterMap reg_map2(thread);
@@ -1078,7 +1079,7 @@
 
 #ifdef ASSERT
   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
-  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
+  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) {
     assert(receiver.not_null(), "should have thrown exception");
     KlassHandle receiver_klass(THREAD, receiver->klass());
     Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
@@ -1240,9 +1241,9 @@
 #endif
 
   if (is_virtual) {
-    assert(receiver.not_null(), "sanity check");
+    assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
-    KlassHandle h_klass(THREAD, receiver->klass());
+    KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
                      is_optimized, static_bound, virtual_call_info,
                      CHECK_(methodHandle()));
--- a/hotspot/src/share/vm/runtime/thread.cpp	Fri Aug 30 00:29:52 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Mon Sep 02 13:13:45 2013 +0200
@@ -3636,6 +3636,16 @@
   CompileBroker::compilation_init();
 #endif
 
+  if (EnableInvokeDynamic) {
+    // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
+    // It is done after compilers are initialized, because otherwise compilations of
+    // signature polymorphic MH intrinsics can be missed
+    // (see SystemDictionary::find_method_handle_intrinsic).
+    initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
+    initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
+    initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
+  }
+
 #if INCLUDE_MANAGEMENT
   Management::initialize(THREAD);
 #endif // INCLUDE_MANAGEMENT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/gcbarriers/G1CrashTest.java	Mon Sep 02 13:13:45 2013 +0200
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8023472
+ * @summary C2 optimization breaks with G1
+ *
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Dcount=100000 G1CrashTest
+ *
+ * @author pbiswal@palantir.com
+ */
+
+public class G1CrashTest {
+    static Object[] set = new Object[11];
+
+    public static void main(String[] args) throws InterruptedException {
+        for (int j = 0; j < Integer.getInteger("count"); j++) {
+            Object key = new Object();
+            insertKey(key);
+            if (j > set.length / 2) {
+                Object[] oldKeys = set;
+                set = new Object[2 * set.length - 1];
+                for (Object o : oldKeys) {
+                    if (o != null)
+                        insertKey(o);
+                }
+            }
+        }
+    }
+
+    static void insertKey(Object key) {
+        int hash = key.hashCode() & 0x7fffffff;
+        int index = hash % set.length;
+        Object cur = set[index];
+        if (cur == null)
+            set[index] = key;
+        else
+            insertKeyRehash(key, index, hash, cur);
+    }
+
+    static void insertKeyRehash(Object key, int index, int hash, Object cur) {
+        int loopIndex = index;
+        int firstRemoved = -1;
+        do {
+            if (cur == "dead")
+                firstRemoved = 1;
+            index--;
+            if (index < 0)
+                index += set.length;
+            cur = set[index];
+            if (cur == null) {
+                if (firstRemoved != -1)
+                    set[firstRemoved] = "dead";
+                else
+                    set[index] = key;
+                return;
+            }
+        } while (index != loopIndex);
+        if (firstRemoved != -1)
+            set[firstRemoved] = null;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jsr292/ConcurrentClassLoadingTest.java	Mon Sep 02 13:13:45 2013 +0200
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8022595
+ * @summary JSR292: deadlock during class loading of MethodHandles, MethodHandleImpl & MethodHandleNatives
+ *
+ * @run main/othervm ConcurrentClassLoadingTest
+ */
+import java.util.*;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+
+public class ConcurrentClassLoadingTest {
+    int numThreads = 0;
+    long seed = 0;
+    CyclicBarrier l;
+    Random rand;
+
+    public static void main(String[] args) throws Throwable {
+        ConcurrentClassLoadingTest test = new ConcurrentClassLoadingTest();
+        test.parseArgs(args);
+        test.run();
+    }
+
+    void parseArgs(String[] args) {
+        int i = 0;
+        while (i < args.length) {
+            String flag = args[i];
+            switch(flag) {
+                case "-seed":
+                    seed = Long.parseLong(args[++i]);
+                    break;
+                case "-numThreads":
+                    numThreads = Integer.parseInt(args[++i]);
+                    break;
+                default:
+                    throw new Error("Unknown flag: " + flag);
+            }
+            ++i;
+        }
+    }
+
+    void init() {
+        if (numThreads == 0) {
+            numThreads = Runtime.getRuntime().availableProcessors();
+        }
+
+        if (seed == 0) {
+            seed = (new Random()).nextLong();
+        }
+        rand = new Random(seed);
+
+        l = new CyclicBarrier(numThreads + 1);
+
+        System.out.printf("Threads: %d\n", numThreads);
+        System.out.printf("Seed: %d\n", seed);
+    }
+
+    final List<Loader> loaders = new ArrayList<>();
+
+    void prepare() {
+        List<String> c = new ArrayList<>(Arrays.asList(classNames));
+
+        // Split classes between loading threads
+        int count = (classNames.length / numThreads) + 1;
+        for (int t = 0; t < numThreads; t++) {
+            List<String> sel = new ArrayList<>();
+
+            System.out.printf("Thread #%d:\n", t);
+            for (int i = 0; i < count; i++) {
+                if (c.size() == 0) break;
+
+                int k = rand.nextInt(c.size());
+                String elem = c.remove(k);
+                sel.add(elem);
+                System.out.printf("\t%s\n", elem);
+            }
+            loaders.add(new Loader(sel));
+        }
+
+        // Print diagnostic info when the test hangs
+        Runtime.getRuntime().addShutdownHook(new Thread() {
+            public void run() {
+                boolean alive = false;
+                for (Loader l : loaders) {
+                    if (!l.isAlive())  continue;
+
+                    if (!alive) {
+                        System.out.println("Some threads are still alive:");
+                        alive = true;
+                    }
+
+                    System.out.println(l.getName());
+                    for (StackTraceElement elem : l.getStackTrace()) {
+                        System.out.println("\t"+elem.toString());
+                    }
+                }
+            }
+        });
+    }
+
+    public void run() throws Throwable {
+        init();
+        prepare();
+
+        for (Loader loader : loaders) {
+            loader.start();
+        }
+
+        l.await();
+
+        for (Loader loader : loaders) {
+            loader.join();
+        }
+    }
+
+    class Loader extends Thread {
+        List<String> classes;
+
+        public Loader(List<String> classes) {
+            this.classes = classes;
+            setDaemon(true);
+        }
+
+        @Override
+        public void run() {
+            try {
+                l.await();
+
+                for (String name : classes) {
+                    Class.forName(name).getName();
+                }
+            } catch (ClassNotFoundException | BrokenBarrierException | InterruptedException e) {
+                throw new Error(e);
+            }
+        }
+    }
+
+    final static String[] classNames = {
+            "java.lang.invoke.AbstractValidatingLambdaMetafactory",
+            "java.lang.invoke.BoundMethodHandle",
+            "java.lang.invoke.CallSite",
+            "java.lang.invoke.ConstantCallSite",
+            "java.lang.invoke.DirectMethodHandle",
+            "java.lang.invoke.InnerClassLambdaMetafactory",
+            "java.lang.invoke.InvokeDynamic",
+            "java.lang.invoke.InvokeGeneric",
+            "java.lang.invoke.InvokerBytecodeGenerator",
+            "java.lang.invoke.Invokers",
+            "java.lang.invoke.LambdaConversionException",
+            "java.lang.invoke.LambdaForm",
+            "java.lang.invoke.LambdaMetafactory",
+            "java.lang.invoke.MagicLambdaImpl",
+            "java.lang.invoke.MemberName",
+            "java.lang.invoke.MethodHandle",
+            "java.lang.invoke.MethodHandleImpl",
+            "java.lang.invoke.MethodHandleInfo",
+            "java.lang.invoke.MethodHandleNatives",
+            "java.lang.invoke.MethodHandleProxies",
+            "java.lang.invoke.MethodHandles",
+            "java.lang.invoke.MethodHandleStatics",
+            "java.lang.invoke.MethodType",
+            "java.lang.invoke.MethodTypeForm",
+            "java.lang.invoke.MutableCallSite",
+            "java.lang.invoke.SerializedLambda",
+            "java.lang.invoke.SimpleMethodHandle",
+            "java.lang.invoke.SwitchPoint",
+            "java.lang.invoke.TypeConvertingMethodAdapter",
+            "java.lang.invoke.VolatileCallSite",
+            "java.lang.invoke.WrongMethodTypeException"
+    };
+}