Merge
authoramurillo
Fri, 06 Sep 2013 11:04:00 -0700
changeset 19733 20a700f38686
parent 19677 b412c9d17267 (current diff)
parent 19732 88f375dd7d65 (diff)
child 19734 c86a71fcceaf
Merge
hotspot/src/share/vm/classfile/genericSignatures.cpp
hotspot/src/share/vm/classfile/genericSignatures.hpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Fri Sep 06 11:04:00 2013 -0700
@@ -354,9 +354,16 @@
   public boolean   getIsMarkedDependent()   { return                isMarkedDependent.getValue(this) != 0; }
   public long      getVtableLen()           { return                vtableLen.getValue(this); }
   public long      getItableLen()           { return                itableLen.getValue(this); }
-  public Symbol    getGenericSignature()    { return                getConstants().getSymbolAt(genericSignatureIndex.getValue(this)); }
   public long      majorVersion()           { return                majorVersion.getValue(this); }
   public long      minorVersion()           { return                minorVersion.getValue(this); }
+  public Symbol    getGenericSignature()    {
+    long index = genericSignatureIndex.getValue(this);
+    if (index != 0) {
+      return getConstants().getSymbolAt(index);
+    } else {
+      return null;
+    }
+  }
 
   // "size helper" == instance size in words
   public long getSizeHelper() {
--- a/hotspot/make/bsd/makefiles/gcc.make	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/make/bsd/makefiles/gcc.make	Fri Sep 06 11:04:00 2013 -0700
@@ -129,16 +129,21 @@
   
     # We only use precompiled headers for the JVM build
     CFLAGS += $(VM_PCH_FLAG)
-  
-    # There are some files which don't like precompiled headers
-    # The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build.
-    # But Clang doesn't support a precompiled header which was compiled with -O3
-    # to be used in a compilation unit which uses '-O0'. We could also prepare an
-    # extra '-O0' PCH file for the opt build and use it here, but it's probably
-    # not worth the effort as long as only two files need this special handling.
+ 
+    # The following files are compiled at various optimization
+    # levels due to optimization issues encountered at the
+    # 'OPT_CFLAGS_DEFAULT' level. The Clang compiler issues a compile
+    # time error if there is an optimization level specification
+    # skew between the PCH file and the C++ file.  Especially if the
+    # PCH file is compiled at a higher optimization level than
+    # the C++ file.  One solution might be to prepare extra optimization
+    # level specific PCH files for the opt build and use them here, but
+    # it's probably not worth the effort as long as only a few files
+    # need this special handling.
     PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH)
     PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
     PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
+    PCH_FLAG/unsafe.o = $(PCH_FLAG/NO_PCH)
   
   endif
 else # ($(USE_CLANG), true)
@@ -306,6 +311,7 @@
 ifeq ($(USE_CLANG), true)
   ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
     OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
+    OPT_CFLAGS/unsafe.o += -O1
   endif
 else
   # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
--- a/hotspot/make/hotspot_version	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/make/hotspot_version	Fri Sep 06 11:04:00 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=48
+HS_BUILD_NUMBER=49
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/windows/create.bat	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/make/windows/create.bat	Fri Sep 06 11:04:00 2013 -0700
@@ -82,6 +82,7 @@
 
 echo **************************************************************
 set ProjectFile=%HotSpotBuildSpace%\jvm.vcproj
+echo MSC_VER = "%MSC_VER%" 
 if "%MSC_VER%" == "1200" (
 set ProjectFile=%HotSpotBuildSpace%\jvm.dsp
 echo Will generate VC6 project {unsupported}
@@ -96,11 +97,17 @@
 echo Will generate VC10 {Visual Studio 2010}
 set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
 ) else (
+if "%MSC_VER%" == "1700" (
+echo Will generate VC10 {compatible with Visual Studio 2012}
+echo After opening in VS 2012, click "Update" when prompted.
+set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
+) else (
 echo Will generate VC7 project {Visual Studio 2003 .NET}
 )
 )
 )
 )
+)
 echo %ProjectFile%
 echo **************************************************************
 
--- a/hotspot/make/windows/makefiles/rules.make	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/make/windows/makefiles/rules.make	Fri Sep 06 11:04:00 2013 -0700
@@ -69,6 +69,13 @@
 VcVersion=VC10
 ProjectFile=jvm.vcxproj
 
+!elseif "$(MSC_VER)" == "1700"
+# This is VS2012, but it loads VS10 projects just fine (and will
+# upgrade them automatically to VS2012 format).
+
+VcVersion=VC10
+ProjectFile=jvm.vcxproj
+
 !else
 
 VcVersion=VC7
--- a/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -307,7 +307,7 @@
       assert(a_byte == *start++, "should be the same code");
     }
 #endif
-  } else if (_id == load_mirror_id) {
+  } else if (_id == load_mirror_id || _id == load_appendix_id) {
     // produce a copy of the load mirror instruction for use by the being initialized case
 #ifdef ASSERT
     address start = __ pc();
@@ -384,6 +384,7 @@
     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -397,7 +398,7 @@
   ce->add_call_info_here(_info);
   __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
   __ delayed()->nop();
-  if (_id == load_klass_id || _id == load_mirror_id) {
+  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
     CodeSection* cs = __ code_section();
     address pc = (address)_pc_start;
     RelocIterator iter(cs, pc, pc + 1);
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -520,7 +520,7 @@
 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
   // Allocate a new index in table to hold the object once it's been patched
   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
-  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
+  PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
 
   AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
   assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -804,6 +804,12 @@
       }
       break;
 
+    case load_appendix_patching_id:
+      { __ set_info("load_appendix_patching", dont_gc_arguments);
+        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
+      }
+      break;
+
     case dtrace_object_alloc_id:
       { // O0: object
         __ set_info("dtrace_object_alloc", dont_gc_arguments);
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -402,6 +402,7 @@
     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -419,7 +420,7 @@
   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
     __ nop();
   }
-  if (_id == load_klass_id || _id == load_mirror_id) {
+  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
     CodeSection* cs = __ code_section();
     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -362,7 +362,7 @@
 
 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
   jobject o = NULL;
-  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id);
+  PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
   __ movoop(reg, o);
   patching_epilog(patch, lir_patch_normal, reg, info);
 }
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1499,6 +1499,13 @@
       }
       break;
 
+    case load_appendix_patching_id:
+      { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
+        // we should set up register map
+        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
+      }
+      break;
+
     case dtrace_object_alloc_id:
       { // rax,: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -2767,7 +2767,19 @@
   Linux::numa_interleave_memory(addr, bytes);
 }
 
+// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
+// bind policy to MPOL_PREFERRED for the current thread.
+#define USE_MPOL_PREFERRED 0
+
 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
+  // To make NUMA and large pages more robust when both enabled, we need to ease
+  // the requirements on where the memory should be allocated. MPOL_BIND is the
+  // default policy and it will force memory to be allocated on the specified
+  // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
+  // the specified node, but will not force it. Using this policy will prevent
+  // getting SIGBUS when trying to allocate large pages on NUMA nodes with no
+  // free large pages.
+  Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
   Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
 }
 
@@ -2869,6 +2881,8 @@
                                             libnuma_dlsym(handle, "numa_tonode_memory")));
       set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
                                             libnuma_dlsym(handle, "numa_interleave_memory")));
+      set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
+                                            libnuma_dlsym(handle, "numa_set_bind_policy")));
 
 
       if (numa_available() != -1) {
@@ -2935,6 +2949,7 @@
 os::Linux::numa_available_func_t os::Linux::_numa_available;
 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
+os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
 unsigned long* os::Linux::_numa_all_nodes;
 
 bool os::pd_uncommit_memory(char* addr, size_t size) {
@@ -2943,6 +2958,53 @@
   return res  != (uintptr_t) MAP_FAILED;
 }
 
+static
+address get_stack_commited_bottom(address bottom, size_t size) {
+  address nbot = bottom;
+  address ntop = bottom + size;
+
+  size_t page_sz = os::vm_page_size();
+  unsigned pages = size / page_sz;
+
+  unsigned char vec[1];
+  unsigned imin = 1, imax = pages + 1, imid;
+  int mincore_return_value;
+
+  while (imin < imax) {
+    imid = (imax + imin) / 2;
+    nbot = ntop - (imid * page_sz);
+
+    // Use a trick with mincore to check whether the page is mapped or not.
+    // mincore sets vec to 1 if page resides in memory and to 0 if page
+    // is swapped output but if page we are asking for is unmapped
+    // it returns -1,ENOMEM
+    mincore_return_value = mincore(nbot, page_sz, vec);
+
+    if (mincore_return_value == -1) {
+      // Page is not mapped go up
+      // to find first mapped page
+      if (errno != EAGAIN) {
+        assert(errno == ENOMEM, "Unexpected mincore errno");
+        imax = imid;
+      }
+    } else {
+      // Page is mapped go down
+      // to find first not mapped page
+      imin = imid + 1;
+    }
+  }
+
+  nbot = nbot + page_sz;
+
+  // Adjust stack bottom one page up if last checked page is not mapped
+  if (mincore_return_value == -1) {
+    nbot = nbot + page_sz;
+  }
+
+  return nbot;
+}
+
+
 // Linux uses a growable mapping for the stack, and if the mapping for
 // the stack guard pages is not removed when we detach a thread the
 // stack cannot grow beyond the pages where the stack guard was
@@ -2957,59 +3019,37 @@
 // So, we need to know the extent of the stack mapping when
 // create_stack_guard_pages() is called.
 
-// Find the bounds of the stack mapping.  Return true for success.
-//
 // We only need this for stacks that are growable: at the time of
 // writing thread stacks don't use growable mappings (i.e. those
 // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
 // only applies to the main thread.
 
-static
-bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) {
-
-  char buf[128];
-  int fd, sz;
-
-  if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) {
-    return false;
-  }
-
-  const char kw[] = "[stack]";
-  const int kwlen = sizeof(kw)-1;
-
-  // Address part of /proc/self/maps couldn't be more than 128 bytes
-  while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) {
-     if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) {
-        // Extract addresses
-        if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
-           uintptr_t sp = (uintptr_t) __builtin_frame_address(0);
-           if (sp >= *bottom && sp <= *top) {
-              ::close(fd);
-              return true;
-           }
-        }
-     }
-  }
-
- ::close(fd);
-  return false;
-}
-
-
 // If the (growable) stack mapping already extends beyond the point
 // where we're going to put our guard pages, truncate the mapping at
 // that point by munmap()ping it.  This ensures that when we later
 // munmap() the guard pages we don't leave a hole in the stack
-// mapping. This only affects the main/initial thread, but guard
-// against future OS changes
+// mapping. This only affects the main/initial thread
+
 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
-  uintptr_t stack_extent, stack_base;
-  bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
-  if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
-      assert(os::Linux::is_initial_thread(),
-           "growable stack in non-initial thread");
-    if (stack_extent < (uintptr_t)addr)
-      ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
+
+  if (os::Linux::is_initial_thread()) {
+    // As we manually grow stack up to bottom inside create_attached_thread(),
+    // it's likely that os::Linux::initial_thread_stack_bottom is mapped and
+    // we don't need to do anything special.
+    // Check it first, before calling heavy function.
+    uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
+    unsigned char vec[1];
+
+    if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
+      // Fallback to slow path on all errors, including EAGAIN
+      stack_extent = (uintptr_t) get_stack_commited_bottom(
+                                    os::Linux::initial_thread_stack_bottom(),
+                                    (size_t)addr - stack_extent);
+    }
+
+    if (stack_extent < (uintptr_t)addr) {
+      ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
+    }
   }
 
   return os::commit_memory(addr, size, !ExecMem);
@@ -3018,13 +3058,13 @@
 // If this is a growable mapping, remove the guard pages entirely by
 // munmap()ping them.  If not, just call uncommit_memory(). This only
 // affects the main/initial thread, but guard against future OS changes
+// It's safe to always unmap guard pages for initial thread because we
+// always place it right after end of the mapped region
+
 bool os::remove_stack_guard_pages(char* addr, size_t size) {
   uintptr_t stack_extent, stack_base;
-  bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
-  if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
-      assert(os::Linux::is_initial_thread(),
-           "growable stack in non-initial thread");
-
+
+  if (os::Linux::is_initial_thread()) {
     return ::munmap(addr, size) == 0;
   }
 
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -235,6 +235,7 @@
   typedef int (*numa_available_func_t)(void);
   typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
   typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
+  typedef void (*numa_set_bind_policy_func_t)(int policy);
 
   static sched_getcpu_func_t _sched_getcpu;
   static numa_node_to_cpus_func_t _numa_node_to_cpus;
@@ -242,6 +243,7 @@
   static numa_available_func_t _numa_available;
   static numa_tonode_memory_func_t _numa_tonode_memory;
   static numa_interleave_memory_func_t _numa_interleave_memory;
+  static numa_set_bind_policy_func_t _numa_set_bind_policy;
   static unsigned long* _numa_all_nodes;
 
   static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
@@ -250,6 +252,7 @@
   static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
+  static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
   static int sched_getcpu_syscall(void);
 public:
@@ -267,6 +270,11 @@
       _numa_interleave_memory(start, size, _numa_all_nodes);
     }
   }
+  static void numa_set_bind_policy(int policy) {
+    if (_numa_set_bind_policy != NULL) {
+      _numa_set_bind_policy(policy);
+    }
+  }
   static int get_node_by_cpu(int cpu_id);
 };
 
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -30,6 +30,8 @@
 #include <unistd.h>
 #include <sys/resource.h>
 #include <sys/utsname.h>
+#include <pthread.h>
+#include <signal.h>
 
 
 // Check core dump limit and report possible place where core can be found
@@ -320,11 +322,17 @@
  * The callback is supposed to provide the method that should be protected.
  */
 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
+  sigset_t saved_sig_mask;
+
   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
       "crash_protection already set?");
 
-  if (sigsetjmp(_jmpbuf, 1) == 0) {
+  // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
+  // since on at least some systems (OS X) siglongjmp will restore the mask
+  // for the process, not the thread
+  pthread_sigmask(0, NULL, &saved_sig_mask);
+  if (sigsetjmp(_jmpbuf, 0) == 0) {
     // make sure we can see in the signal handler that we have crash protection
     // installed
     WatcherThread::watcher_thread()->set_crash_protection(this);
@@ -334,6 +342,7 @@
     return true;
   }
   // this happens when we siglongjmp() back
+  pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
   WatcherThread::watcher_thread()->set_crash_protection(NULL);
   return false;
 }
--- a/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -44,6 +44,6 @@
 define_pd_global(intx, CompilerThreadStackSize,  0);
 
 // Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx,HeapBaseMinAddress,       256*M);
+define_pd_global(uintx,HeapBaseMinAddress,       2*G);
 
 #endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP
--- a/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Fri Sep 06 11:04:00 2013 -0700
@@ -106,10 +106,12 @@
                         " (" + getMethod().getBytes() + " bytes) " + getReason());
             }
         }
+        stream.printf(" (end time: %6.4f", getTimeStamp());
         if (getEndNodes() > 0) {
-            stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes());
+            stream.printf(" nodes: %d live: %d", getEndNodes(), getEndLiveNodes());
         }
-        stream.println("");
+        stream.println(")");
+
         if (getReceiver() != null) {
             emit(stream, indent + 4);
             //                 stream.println("type profile " + method.holder + " -> " + receiver + " (" +
--- a/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Fri Sep 06 11:04:00 2013 -0700
@@ -207,7 +207,12 @@
     }
 
     String search(Attributes attr, String name) {
-        return search(attr, name, null);
+        String result = attr.getValue(name);
+        if (result != null) {
+            return result;
+        } else {
+            throw new InternalError("can't find " + name);
+        }
     }
 
     String search(Attributes attr, String name, String defaultValue) {
@@ -215,13 +220,7 @@
         if (result != null) {
             return result;
         }
-        if (defaultValue != null) {
-            return defaultValue;
-        }
-        for (int i = 0; i < attr.getLength(); i++) {
-            System.out.println(attr.getQName(i) + " " + attr.getValue(attr.getQName(i)));
-        }
-        throw new InternalError("can't find " + name);
+        return defaultValue;
     }
     int indent = 0;
 
@@ -268,17 +267,18 @@
             Phase p = new Phase(search(atts, "name"),
                     Double.parseDouble(search(atts, "stamp")),
                     Integer.parseInt(search(atts, "nodes", "0")),
-                    Integer.parseInt(search(atts, "live")));
+                    Integer.parseInt(search(atts, "live", "0")));
             phaseStack.push(p);
         } else if (qname.equals("phase_done")) {
             Phase p = phaseStack.pop();
-            if (! p.getId().equals(search(atts, "name"))) {
+            String phaseName = search(atts, "name", null);
+            if (phaseName != null && !p.getId().equals(phaseName)) {
                 System.out.println("phase: " + p.getId());
                 throw new InternalError("phase name mismatch");
             }
             p.setEnd(Double.parseDouble(search(atts, "stamp")));
             p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
-            p.setEndLiveNodes(Integer.parseInt(search(atts, "live")));
+            p.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
             compile.getPhases().add(p);
         } else if (qname.equals("task")) {
             compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
@@ -413,8 +413,8 @@
             }
         } else if (qname.equals("parse_done")) {
             CallSite call = scopes.pop();
-            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
-            call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1")));
+            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
+            call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
             call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
             scopes.push(call);
         }
--- a/hotspot/src/share/vm/adlc/arena.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/adlc/arena.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 
 #include "adlc.hpp"
 
-void* Chunk::operator new(size_t requested_size, size_t length) {
+void* Chunk::operator new(size_t requested_size, size_t length) throw() {
   return CHeapObj::operator new(requested_size + length);
 }
 
@@ -163,7 +163,7 @@
 //-----------------------------------------------------------------------------
 // CHeapObj
 
-void* CHeapObj::operator new(size_t size){
+void* CHeapObj::operator new(size_t size) throw() {
   return (void *) malloc(size);
 }
 
--- a/hotspot/src/share/vm/adlc/arena.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/adlc/arena.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 
 class CHeapObj {
  public:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void  operator delete(void* p);
   void* new_array(size_t size);
 };
@@ -53,7 +53,7 @@
 
 class ValueObj {
  public:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void operator delete(void* p);
 };
 
@@ -61,7 +61,7 @@
 
 class AllStatic {
  public:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void operator delete(void* p);
 };
 
@@ -70,7 +70,7 @@
 // Linked list of raw memory chunks
 class Chunk: public CHeapObj {
  public:
-  void* operator new(size_t size, size_t length);
+  void* operator new(size_t size, size_t length) throw();
   void  operator delete(void* p, size_t length);
   Chunk(size_t length);
 
--- a/hotspot/src/share/vm/adlc/main.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/adlc/main.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -485,7 +485,7 @@
 
 // VS2005 has its own definition, identical to this one.
 #if !defined(_WIN32) || defined(_WIN64) || _MSC_VER < 1400
-void *operator new( size_t size, int, const char *, int ) {
+void *operator new( size_t size, int, const char *, int ) throw() {
   return ::operator new( size );
 }
 #endif
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1095,7 +1095,7 @@
         fprintf(fp, "  // Identify previous instruction if inside this block\n");
         fprintf(fp, "  if( ");
         print_block_index(fp, inst_position);
-        fprintf(fp, " > 0 ) {\n    Node *n = block->_nodes.at(");
+        fprintf(fp, " > 0 ) {\n    Node *n = block->get_node(");
         print_block_index(fp, inst_position);
         fprintf(fp, ");\n    inst%d = (n->is_Mach()) ? ", inst_position);
         fprintf(fp, "n->as_Mach() : NULL;\n  }\n");
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -296,8 +296,8 @@
   // CodeBuffers must be allocated on the stack except for a single
   // special case during expansion which is handled internally.  This
   // is done to guarantee proper cleanup of resources.
-  void* operator new(size_t size) { return ResourceObj::operator new(size); }
-  void  operator delete(void* p)  { ShouldNotCallThis(); }
+  void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
+  void  operator delete(void* p)          { ShouldNotCallThis(); }
 
  public:
   typedef int csize_t;  // code size type; would be size_t except for history
--- a/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -364,7 +364,8 @@
   enum PatchID {
     access_field_id,
     load_klass_id,
-    load_mirror_id
+    load_mirror_id,
+    load_appendix_id
   };
   enum constants {
     patch_info_size = 3
@@ -417,7 +418,7 @@
       }
       NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
       n_move->set_offset(field_offset);
-    } else if (_id == load_klass_id || _id == load_mirror_id) {
+    } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
       assert(_obj != noreg, "must have register object for load_klass/load_mirror");
 #ifdef ASSERT
       // verify that we're pointing at a NativeMovConstReg
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -74,16 +74,19 @@
  private:
   JavaThread* _thread;
   CompileLog* _log;
+  TimerName _timer;
 
  public:
   PhaseTraceTime(TimerName timer)
-  : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) {
+  : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose),
+    _log(NULL), _timer(timer)
+  {
     if (Compilation::current() != NULL) {
       _log = Compilation::current()->log();
     }
 
     if (_log != NULL) {
-      _log->begin_head("phase name='%s'", timer_name[timer]);
+      _log->begin_head("phase name='%s'", timer_name[_timer]);
       _log->stamp();
       _log->end_head();
     }
@@ -91,7 +94,7 @@
 
   ~PhaseTraceTime() {
     if (_log != NULL)
-      _log->done("phase");
+      _log->done("phase name='%s'", timer_name[_timer]);
   }
 };
 
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -279,8 +279,8 @@
 // Base class for objects allocated by the compiler in the compilation arena
 class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); }
-  void* operator new(size_t size, Arena* arena) {
+  void* operator new(size_t size) throw() { return Compilation::current()->arena()->Amalloc(size); }
+  void* operator new(size_t size, Arena* arena) throw() {
     return arena->Amalloc(size);
   }
   void  operator delete(void* p) {} // nothing to do
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1583,7 +1583,7 @@
       ObjectType* obj_type = obj->type()->as_ObjectType();
       if (obj_type->is_constant() && !PatchALot) {
         ciObject* const_oop = obj_type->constant_value();
-        if (!const_oop->is_null_object()) {
+        if (!const_oop->is_null_object() && const_oop->is_loaded()) {
           if (field->is_constant()) {
             ciConstant field_val = field->constant_value_of(const_oop);
             BasicType field_type = field_val.basic_type();
@@ -1667,9 +1667,8 @@
   const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
   assert(declared_signature != NULL, "cannot be null");
 
-  // FIXME bail out for now
-  if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
-    BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
+  if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
+    BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
   }
 
   // we have to make sure the argument size (incl. the receiver)
@@ -1713,10 +1712,23 @@
       code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
       break;
     }
+  } else {
+    if (bc_raw == Bytecodes::_invokehandle) {
+      assert(!will_link, "should come here only for unlinked call");
+      code = Bytecodes::_invokespecial;
+    }
   }
 
   // Push appendix argument (MethodType, CallSite, etc.), if one.
-  if (stream()->has_appendix()) {
+  bool patch_for_appendix = false;
+  int patching_appendix_arg = 0;
+  if (C1PatchInvokeDynamic &&
+      (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
+    Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
+    apush(arg);
+    patch_for_appendix = true;
+    patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
+  } else if (stream()->has_appendix()) {
     ciObject* appendix = stream()->get_appendix();
     Value arg = append(new Constant(new ObjectConstant(appendix)));
     apush(arg);
@@ -1732,7 +1744,8 @@
   if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
       !(// %%% FIXME: Are both of these relevant?
         target->is_method_handle_intrinsic() ||
-        target->is_compiled_lambda_form())) {
+        target->is_compiled_lambda_form()) &&
+      !patch_for_appendix) {
     Value receiver = NULL;
     ciInstanceKlass* receiver_klass = NULL;
     bool type_is_exact = false;
@@ -1850,7 +1863,8 @@
   // check if we could do inlining
   if (!PatchALot && Inline && klass->is_loaded() &&
       (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
-      && target->is_loaded()) {
+      && target->is_loaded()
+      && !patch_for_appendix) {
     // callee is known => check if we have static binding
     assert(target->is_loaded(), "callee must be known");
     if (code == Bytecodes::_invokestatic  ||
@@ -1901,7 +1915,7 @@
     code == Bytecodes::_invokespecial   ||
     code == Bytecodes::_invokevirtual   ||
     code == Bytecodes::_invokeinterface;
-  Values* args = state()->pop_arguments(target->arg_size_no_receiver());
+  Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
   Value recv = has_receiver ? apop() : NULL;
   int vtable_index = Method::invalid_vtable_index;
 
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -323,7 +323,7 @@
   }
 
  public:
-  void* operator new(size_t size) {
+  void* operator new(size_t size) throw() {
     Compilation* c = Compilation::current();
     void* res = c->arena()->Amalloc(size);
     ((Instruction*)res)->_id = c->get_next_id();
@@ -1611,7 +1611,7 @@
   friend class SuxAndWeightAdjuster;
 
  public:
-   void* operator new(size_t size) {
+   void* operator new(size_t size) throw() {
     Compilation* c = Compilation::current();
     void* res = c->arena()->Amalloc(size);
     ((BlockBegin*)res)->_id = c->get_next_id();
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1211,8 +1211,6 @@
   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
   bool is_method_handle_invoke() const {
     return
-      is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
-      ||
       method()->is_compiled_lambda_form()  // Java-generated adapter
       ||
       method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -93,12 +93,23 @@
       default:
         ShouldNotReachHere();
     }
+  } else if (patch->id() == PatchingStub::load_appendix_id) {
+    Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
+    assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
   } else {
     ShouldNotReachHere();
   }
 #endif
 }
 
+PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
+  IRScope* scope = info->scope();
+  Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
+  if (Bytecodes::has_optional_appendix(bc_raw)) {
+    return PatchingStub::load_appendix_id;
+  }
+  return PatchingStub::load_mirror_id;
+}
 
 //---------------------------------------------------------------
 
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -119,6 +119,8 @@
 
   void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
 
+  PatchingStub::PatchID patching_id(CodeEmitInfo* info);
+
  public:
   LIR_Assembler(Compilation* c);
   ~LIR_Assembler();
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -819,6 +819,7 @@
   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
+  Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
   bool load_klass_or_mirror_patch_id =
     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 
@@ -888,10 +889,32 @@
           mirror = Handle(THREAD, m);
         }
         break;
-      default: Unimplemented();
+      default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
     }
     // convert to handle
     load_klass = KlassHandle(THREAD, k);
+  } else if (stub_id == load_appendix_patching_id) {
+    Bytecode_invoke bytecode(caller_method, bci);
+    Bytecodes::Code bc = bytecode.invoke_code();
+
+    CallInfo info;
+    constantPoolHandle pool(thread, caller_method->constants());
+    int index = bytecode.index();
+    LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
+    appendix = info.resolved_appendix();
+    switch (bc) {
+      case Bytecodes::_invokehandle: {
+        int cache_index = ConstantPool::decode_cpcache_index(index, true);
+        assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
+        pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
+        break;
+      }
+      case Bytecodes::_invokedynamic: {
+        pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
+        break;
+      }
+      default: fatal("unexpected bytecode for load_appendix_patching_id");
+    }
   } else {
     ShouldNotReachHere();
   }
@@ -992,8 +1015,8 @@
                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
                    "illegal init value");
             if (stub_id == Runtime1::load_klass_patching_id) {
-            assert(load_klass() != NULL, "klass not set");
-            n_copy->set_data((intx) (load_klass()));
+              assert(load_klass() != NULL, "klass not set");
+              n_copy->set_data((intx) (load_klass()));
             } else {
               assert(mirror() != NULL, "klass not set");
               n_copy->set_data((intx) (mirror()));
@@ -1002,43 +1025,55 @@
             if (TracePatching) {
               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
             }
+          }
+        } else if (stub_id == Runtime1::load_appendix_patching_id) {
+          NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
+          assert(n_copy->data() == 0 ||
+                 n_copy->data() == (intptr_t)Universe::non_oop_word(),
+                 "illegal init value");
+          n_copy->set_data((intx) (appendix()));
 
-#if defined(SPARC) || defined(PPC)
-            // Update the location in the nmethod with the proper
-            // metadata.  When the code was generated, a NULL was stuffed
-            // in the metadata table and that table needs to be update to
-            // have the right value.  On intel the value is kept
-            // directly in the instruction instead of in the metadata
-            // table, so set_data above effectively updated the value.
-            nmethod* nm = CodeCache::find_nmethod(instr_pc);
-            assert(nm != NULL, "invalid nmethod_pc");
-            RelocIterator mds(nm, copy_buff, copy_buff + 1);
-            bool found = false;
-            while (mds.next() && !found) {
-              if (mds.type() == relocInfo::oop_type) {
-                assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
-                oop_Relocation* r = mds.oop_reloc();
-                oop* oop_adr = r->oop_addr();
-                *oop_adr = mirror();
-                r->fix_oop_relocation();
-                found = true;
-              } else if (mds.type() == relocInfo::metadata_type) {
-                assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
-                metadata_Relocation* r = mds.metadata_reloc();
-                Metadata** metadata_adr = r->metadata_addr();
-                *metadata_adr = load_klass();
-                r->fix_metadata_relocation();
-                found = true;
-              }
-            }
-            assert(found, "the metadata must exist!");
-#endif
-
+          if (TracePatching) {
+            Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
           }
         } else {
           ShouldNotReachHere();
         }
 
+#if defined(SPARC) || defined(PPC)
+        if (load_klass_or_mirror_patch_id ||
+            stub_id == Runtime1::load_appendix_patching_id) {
+          // Update the location in the nmethod with the proper
+          // metadata.  When the code was generated, a NULL was stuffed
+          // in the metadata table and that table needs to be update to
+          // have the right value.  On intel the value is kept
+          // directly in the instruction instead of in the metadata
+          // table, so set_data above effectively updated the value.
+          nmethod* nm = CodeCache::find_nmethod(instr_pc);
+          assert(nm != NULL, "invalid nmethod_pc");
+          RelocIterator mds(nm, copy_buff, copy_buff + 1);
+          bool found = false;
+          while (mds.next() && !found) {
+            if (mds.type() == relocInfo::oop_type) {
+              assert(stub_id == Runtime1::load_mirror_patching_id ||
+                     stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
+              oop_Relocation* r = mds.oop_reloc();
+              oop* oop_adr = r->oop_addr();
+              *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
+              r->fix_oop_relocation();
+              found = true;
+            } else if (mds.type() == relocInfo::metadata_type) {
+              assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
+              metadata_Relocation* r = mds.metadata_reloc();
+              Metadata** metadata_adr = r->metadata_addr();
+              *metadata_adr = load_klass();
+              r->fix_metadata_relocation();
+              found = true;
+            }
+          }
+          assert(found, "the metadata must exist!");
+        }
+#endif
         if (do_patch) {
           // replace instructions
           // first replace the tail, then the call
@@ -1077,7 +1112,8 @@
           ICache::invalidate_range(instr_pc, *byte_count);
           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
 
-          if (load_klass_or_mirror_patch_id) {
+          if (load_klass_or_mirror_patch_id ||
+              stub_id == Runtime1::load_appendix_patching_id) {
             relocInfo::relocType rtype =
               (stub_id == Runtime1::load_klass_patching_id) ?
                                    relocInfo::metadata_type :
@@ -1118,7 +1154,8 @@
 
   // If we are patching in a non-perm oop, make sure the nmethod
   // is on the right list.
-  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
+  if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
+                              (appendix.not_null() && appendix->is_scavengable()))) {
     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
@@ -1179,6 +1216,24 @@
   return caller_is_deopted();
 }
 
+int Runtime1::move_appendix_patching(JavaThread* thread) {
+//
+// NOTE: we are still in Java
+//
+  Thread* THREAD = thread;
+  debug_only(NoHandleMark nhm;)
+  {
+    // Enter VM mode
+
+    ResetNoHandleMark rnhm;
+    patch_code(thread, load_appendix_patching_id);
+  }
+  // Back in JAVA, use no oops DON'T safepoint
+
+  // Return true if calling code is deoptimized
+
+  return caller_is_deopted();
+}
 //
 // Entry point for compiled code. We want to patch a nmethod.
 // We don't do a normal VM transition here because we want to
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -67,6 +67,7 @@
   stub(access_field_patching)        \
   stub(load_klass_patching)          \
   stub(load_mirror_patching)         \
+  stub(load_appendix_patching)       \
   stub(g1_pre_barrier_slow)          \
   stub(g1_post_barrier_slow)         \
   stub(fpu2long_stub)                \
@@ -160,6 +161,7 @@
   static int access_field_patching(JavaThread* thread);
   static int move_klass_patching(JavaThread* thread);
   static int move_mirror_patching(JavaThread* thread);
+  static int move_appendix_patching(JavaThread* thread);
 
   static void patch_code(JavaThread* thread, StubID stub_id);
 
--- a/hotspot/src/share/vm/c1/c1_globals.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_globals.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -25,4 +25,4 @@
 #include "precompiled.hpp"
 #include "c1/c1_globals.hpp"
 
-C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/c1/c1_globals.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -54,7 +54,7 @@
 //
 // Defines all global flags used by the client compiler.
 //
-#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
+#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
                                                                             \
   /* Printing */                                                            \
   notproduct(bool, PrintC1Statistics, false,                                \
@@ -333,15 +333,19 @@
           "Use CHA and exact type results at call sites when updating MDOs")\
                                                                             \
   product(bool, C1UpdateMethodData, trueInTiered,                           \
-          "Update MethodData*s in Tier1-generated code")                  \
+          "Update MethodData*s in Tier1-generated code")                    \
                                                                             \
   develop(bool, PrintCFGToFile, false,                                      \
           "print control flow graph to a separate file during compilation") \
                                                                             \
+  diagnostic(bool, C1PatchInvokeDynamic, true,                              \
+             "Patch invokedynamic appendix not known at compile time")      \
+                                                                            \
+                                                                            \
 
 
 // Read default values for c1 globals
 
-C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
 
 #endif // SHARE_VM_C1_C1_GLOBALS_HPP
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1150,6 +1150,10 @@
   record_method_not_compilable("out of memory");
 }
 
+ciInstance* ciEnv::unloaded_ciinstance() {
+  GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
+}
+
 void ciEnv::dump_replay_data(outputStream* out) {
   VM_ENTRY_MARK;
   MutexLocker ml(Compile_lock);
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -400,6 +400,7 @@
   static ciInstanceKlass* unloaded_ciinstance_klass() {
     return _unloaded_ciinstance_klass;
   }
+  ciInstance* unloaded_ciinstance();
 
   ciKlass*  find_system_klass(ciSymbol* klass_name);
   // Note:  To find a class from its name string, use ciSymbol::make,
--- a/hotspot/src/share/vm/ci/ciInstance.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciInstance.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -60,10 +60,10 @@
 //
 // Constant value of a field.
 ciConstant ciInstance::field_value(ciField* field) {
-  assert(is_loaded() &&
-         field->holder()->is_loaded() &&
-         klass()->is_subclass_of(field->holder()),
-         "invalid access");
+  assert(is_loaded(), "invalid access - must be loaded");
+  assert(field->holder()->is_loaded(), "invalid access - holder must be loaded");
+  assert(klass()->is_subclass_of(field->holder()), "invalid access - must be subclass");
+
   VM_ENTRY_MARK;
   ciConstant result;
   Handle obj = get_oop();
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -177,6 +177,10 @@
     address bcp = code() + bci;
     return Bytecodes::java_code_at(NULL, bcp);
   }
+  Bytecodes::Code raw_code_at_bci(int bci) {
+    address bcp = code() + bci;
+    return Bytecodes::code_at(NULL, bcp);
+  }
   BCEscapeAnalyzer  *get_bcea();
   ciMethodBlocks    *get_method_blocks();
 
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -563,7 +563,10 @@
   return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
 }
 
-
+ciInstance* ciObjectFactory::get_unloaded_object_constant() {
+  if (ciEnv::_Object_klass == NULL)  return NULL;
+  return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
+}
 
 //------------------------------------------------------------------
 // ciObjectFactory::get_empty_methodData
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -131,6 +131,8 @@
   ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
 
 
+  ciInstance* get_unloaded_object_constant();
+
   // Get the ciMethodData representing the methodData for a method
   // with none.
   ciMethodData* get_empty_methodData();
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -28,7 +28,6 @@
 #include "classfile/classLoaderData.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/defaultMethods.hpp"
-#include "classfile/genericSignatures.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
@@ -3039,35 +3038,6 @@
   return annotations;
 }
 
-
-#ifdef ASSERT
-static void parseAndPrintGenericSignatures(
-    instanceKlassHandle this_klass, TRAPS) {
-  assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise");
-  ResourceMark rm;
-
-  if (this_klass->generic_signature() != NULL) {
-    using namespace generic;
-    ClassDescriptor* spec = ClassDescriptor::parse_generic_signature(this_klass(), CHECK);
-
-    tty->print_cr("Parsing %s", this_klass->generic_signature()->as_C_string());
-    spec->print_on(tty);
-
-    for (int i = 0; i < this_klass->methods()->length(); ++i) {
-      Method* m = this_klass->methods()->at(i);
-      MethodDescriptor* method_spec = MethodDescriptor::parse_generic_signature(m, spec);
-      Symbol* sig = m->generic_signature();
-      if (sig == NULL) {
-        sig = m->signature();
-      }
-      tty->print_cr("Parsing %s", sig->as_C_string());
-      method_spec->print_on(tty);
-    }
-  }
-}
-#endif // def ASSERT
-
-
 instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index,
                                                        TRAPS) {
   instanceKlassHandle super_klass;
@@ -4060,12 +4030,6 @@
     java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle));
 
 
-#ifdef ASSERT
-    if (ParseAllGenericSignatures) {
-      parseAndPrintGenericSignatures(this_klass, CHECK_(nullHandle));
-    }
-#endif
-
     // Generate any default methods - default methods are interface methods
     // that have a default implementation.  This is new with Lambda project.
     if (has_default_methods && !access_flags.is_interface() &&
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -197,7 +197,7 @@
 }
 
 
-ClassFileStream* ClassPathDirEntry::open_stream(const char* name) {
+ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
   // construct full path name
   char path[JVM_MAXPATHLEN];
   if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) {
@@ -240,7 +240,7 @@
   FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
 }
 
-ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
+ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
   // enable call to C land
   JavaThread* thread = JavaThread::current();
   ThreadToNativeFromVM ttn(thread);
@@ -284,24 +284,24 @@
   }
 }
 
-LazyClassPathEntry::LazyClassPathEntry(char* path, struct stat st) : ClassPathEntry() {
+LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
   _path = strdup(path);
-  _st = st;
+  _st = *st;
   _meta_index = NULL;
   _resolved_entry = NULL;
+  _has_error = false;
 }
 
 bool LazyClassPathEntry::is_jar_file() {
   return ((_st.st_mode & S_IFREG) == S_IFREG);
 }
 
-ClassPathEntry* LazyClassPathEntry::resolve_entry() {
+ClassPathEntry* LazyClassPathEntry::resolve_entry(TRAPS) {
   if (_resolved_entry != NULL) {
     return (ClassPathEntry*) _resolved_entry;
   }
   ClassPathEntry* new_entry = NULL;
-  ClassLoader::create_class_path_entry(_path, _st, &new_entry, false);
-  assert(new_entry != NULL, "earlier code should have caught this");
+  new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
   {
     ThreadCritical tc;
     if (_resolved_entry == NULL) {
@@ -314,12 +314,21 @@
   return (ClassPathEntry*) _resolved_entry;
 }
 
-ClassFileStream* LazyClassPathEntry::open_stream(const char* name) {
+ClassFileStream* LazyClassPathEntry::open_stream(const char* name, TRAPS) {
   if (_meta_index != NULL &&
       !_meta_index->may_contain(name)) {
     return NULL;
   }
-  return resolve_entry()->open_stream(name);
+  if (_has_error) {
+    return NULL;
+  }
+  ClassPathEntry* cpe = resolve_entry(THREAD);
+  if (cpe == NULL) {
+    _has_error = true;
+    return NULL;
+  } else {
+    return cpe->open_stream(name, THREAD);
+  }
 }
 
 bool LazyClassPathEntry::is_lazy() {
@@ -465,20 +474,19 @@
   }
 }
 
-void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy) {
+ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
   JavaThread* thread = JavaThread::current();
   if (lazy) {
-    *new_entry = new LazyClassPathEntry(path, st);
-    return;
+    return new LazyClassPathEntry(path, st);
   }
-  if ((st.st_mode & S_IFREG) == S_IFREG) {
+  ClassPathEntry* new_entry = NULL;
+  if ((st->st_mode & S_IFREG) == S_IFREG) {
     // Regular file, should be a zip file
     // Canonicalized filename
     char canonical_path[JVM_MAXPATHLEN];
     if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
       // This matches the classic VM
-      EXCEPTION_MARK;
-      THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");
+      THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
     }
     char* error_msg = NULL;
     jzfile* zip;
@@ -489,7 +497,7 @@
       zip = (*ZipOpen)(canonical_path, &error_msg);
     }
     if (zip != NULL && error_msg == NULL) {
-      *new_entry = new ClassPathZipEntry(zip, path);
+      new_entry = new ClassPathZipEntry(zip, path);
       if (TraceClassLoading) {
         tty->print_cr("[Opened %s]", path);
       }
@@ -504,16 +512,16 @@
         msg = NEW_RESOURCE_ARRAY(char, len); ;
         jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
       }
-      EXCEPTION_MARK;
-      THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);
+      THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
     }
   } else {
     // Directory
-    *new_entry = new ClassPathDirEntry(path);
+    new_entry = new ClassPathDirEntry(path);
     if (TraceClassLoading) {
       tty->print_cr("[Path %s]", path);
     }
   }
+  return new_entry;
 }
 
 
@@ -572,13 +580,14 @@
   }
 }
 
-void ClassLoader::update_class_path_entry_list(const char *path,
+void ClassLoader::update_class_path_entry_list(char *path,
                                                bool check_for_duplicates) {
   struct stat st;
-  if (os::stat((char *)path, &st) == 0) {
+  if (os::stat(path, &st) == 0) {
     // File or directory found
     ClassPathEntry* new_entry = NULL;
-    create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader);
+    Thread* THREAD = Thread::current();
+    new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
     // The kernel VM adds dynamically to the end of the classloader path and
     // doesn't reorder the bootclasspath which would break java.lang.Package
     // (see PackageInfo).
@@ -897,7 +906,7 @@
                                PerfClassTraceTime::CLASS_LOAD);
     ClassPathEntry* e = _first_entry;
     while (e != NULL) {
-      stream = e->open_stream(name);
+      stream = e->open_stream(name, CHECK_NULL);
       if (stream != NULL) {
         break;
       }
@@ -1257,11 +1266,16 @@
 }
 
 void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) {
-  resolve_entry()->compile_the_world(loader, CHECK);
+  ClassPathEntry* cpe = resolve_entry(THREAD);
+  if (cpe != NULL) {
+    cpe->compile_the_world(loader, CHECK);
+  }
 }
 
 bool LazyClassPathEntry::is_rt_jar() {
-  return resolve_entry()->is_rt_jar();
+  Thread* THREAD = Thread::current();
+  ClassPathEntry* cpe = resolve_entry(THREAD);
+  return (cpe != NULL) ? cpe->is_jar_file() : false;
 }
 
 void ClassLoader::compile_the_world() {
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,7 @@
   ClassPathEntry();
   // Attempt to locate file_name through this class path entry.
   // Returns a class file parsing stream if successfull.
-  virtual ClassFileStream* open_stream(const char* name) = 0;
+  virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0;
   // Debugging
   NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
   NOT_PRODUCT(virtual bool is_rt_jar() = 0;)
@@ -77,7 +77,7 @@
   bool is_jar_file()  { return false;  }
   const char* name()  { return _dir; }
   ClassPathDirEntry(char* dir);
-  ClassFileStream* open_stream(const char* name);
+  ClassFileStream* open_stream(const char* name, TRAPS);
   // Debugging
   NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
   NOT_PRODUCT(bool is_rt_jar();)
@@ -107,7 +107,7 @@
   const char* name()  { return _zip_name; }
   ClassPathZipEntry(jzfile* zip, const char* zip_name);
   ~ClassPathZipEntry();
-  ClassFileStream* open_stream(const char* name);
+  ClassFileStream* open_stream(const char* name, TRAPS);
   void contents_do(void f(const char* name, void* context), void* context);
   // Debugging
   NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
@@ -125,13 +125,14 @@
   char* _path; // dir or file
   struct stat _st;
   MetaIndex* _meta_index;
+  bool _has_error;
   volatile ClassPathEntry* _resolved_entry;
-  ClassPathEntry* resolve_entry();
+  ClassPathEntry* resolve_entry(TRAPS);
  public:
   bool is_jar_file();
   const char* name()  { return _path; }
-  LazyClassPathEntry(char* path, struct stat st);
-  ClassFileStream* open_stream(const char* name);
+  LazyClassPathEntry(char* path, const struct stat* st);
+  ClassFileStream* open_stream(const char* name, TRAPS);
   void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
   virtual bool is_lazy();
   // Debugging
@@ -207,14 +208,15 @@
   static void setup_meta_index();
   static void setup_bootstrap_search_path();
   static void load_zip_library();
-  static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy);
+  static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
+                                                 bool lazy, TRAPS);
 
   // Canonicalizes path names, so strcmp will work properly. This is mainly
   // to avoid confusing the zip library
   static bool get_canonical_path(char* orig, char* out, int len);
  public:
   // Used by the kernel jvm.
-  static void update_class_path_entry_list(const char *path,
+  static void update_class_path_entry_list(char *path,
                                            bool check_for_duplicates);
   static void print_bootclasspath();
 
--- a/hotspot/src/share/vm/classfile/defaultMethods.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "classfile/bytecodeAssembler.hpp"
 #include "classfile/defaultMethods.hpp"
-#include "classfile/genericSignatures.hpp"
 #include "classfile/symbolTable.hpp"
 #include "memory/allocation.hpp"
 #include "memory/metadataFactory.hpp"
@@ -75,14 +74,6 @@
   }
 };
 
-class ContextMark : public PseudoScopeMark {
- private:
-  generic::Context::Mark _mark;
- public:
-  ContextMark(const generic::Context::Mark& cm) : _mark(cm) {}
-  virtual void destroy() { _mark.destroy(); }
-};
-
 #ifndef PRODUCT
 static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
   ResourceMark rm;
@@ -503,38 +494,6 @@
   return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
 }
 
-// A generic method family contains a set of all methods that implement a single
-// language-level method.  Because of erasure, these methods may have different
-// signatures.  As members of the set are collected while walking over the
-// hierarchy, they are tagged with a qualification state.  The qualification
-// state for an erased method is set to disqualified if there exists a path
-// from the root of hierarchy to the method that contains an interleaving
-// language-equivalent method defined in an interface.
-class GenericMethodFamily : public MethodFamily {
- private:
-
-  generic::MethodDescriptor* _descriptor; // language-level description
-
- public:
-
-  GenericMethodFamily(generic::MethodDescriptor* canonical_desc)
-      : _descriptor(canonical_desc) {}
-
-  generic::MethodDescriptor* descriptor() const { return _descriptor; }
-
-  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
-    return descriptor()->covariant_match(md, ctx);
-  }
-
-#ifndef PRODUCT
-  Symbol* get_generic_sig() const {
-
-    generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
-    TempNewSymbol sig = descriptor()->reify_signature(&ctx, Thread::current());
-    return sig;
-  }
-#endif // ndef PRODUCT
-};
 
 class StateRestorer;
 
@@ -571,26 +530,6 @@
   StateRestorer* record_method_and_dq_further(Method* mo);
 };
 
-
-// StatefulGenericMethodFamily is a wrapper around GenericMethodFamily that maintains the
-// qualification state during hierarchy visitation, and applies that state
-// when adding members to the GenericMethodFamily.
-class StatefulGenericMethodFamily : public StatefulMethodFamily {
-
- public:
-  StatefulGenericMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx)
-  : StatefulMethodFamily(new GenericMethodFamily(md->canonicalize(ctx))) {
-
-  }
-  GenericMethodFamily* get_method_family() {
-    return (GenericMethodFamily*)_method_family;
-  }
-
-  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
-    return get_method_family()->descriptor_matches(md, ctx);
-  }
-};
-
 class StateRestorer : public PseudoScopeMark {
  private:
   StatefulMethodFamily* _method;
@@ -616,39 +555,6 @@
   return mark;
 }
 
-class StatefulGenericMethodFamilies : public ResourceObj {
- private:
-  GrowableArray<StatefulGenericMethodFamily*> _methods;
-
- public:
-  StatefulGenericMethodFamily* find_matching(
-      generic::MethodDescriptor* md, generic::Context* ctx) {
-    for (int i = 0; i < _methods.length(); ++i) {
-      StatefulGenericMethodFamily* existing = _methods.at(i);
-      if (existing->descriptor_matches(md, ctx)) {
-        return existing;
-      }
-    }
-    return NULL;
-  }
-
-  StatefulGenericMethodFamily* find_matching_or_create(
-      generic::MethodDescriptor* md, generic::Context* ctx) {
-    StatefulGenericMethodFamily* method = find_matching(md, ctx);
-    if (method == NULL) {
-      method = new StatefulGenericMethodFamily(md, ctx);
-      _methods.append(method);
-    }
-    return method;
-  }
-
-  void extract_families_into(GrowableArray<GenericMethodFamily*>* array) {
-    for (int i = 0; i < _methods.length(); ++i) {
-      array->append(_methods.at(i)->get_method_family());
-    }
-  }
-};
-
 // Represents a location corresponding to a vtable slot for methods that
 // neither the class nor any of it's ancestors provide an implementaion.
 // Default methods may be present to fill this slot.
@@ -779,146 +685,11 @@
 
 };
 
-// Iterates over the type hierarchy looking for all methods with a specific
-// method name.  The result of this is a set of method families each of
-// which is populated with a set of methods that implement the same
-// language-level signature.
-class FindMethodsByGenericSig : public HierarchyVisitor<FindMethodsByGenericSig> {
- private:
-  // Context data
-  Thread* THREAD;
-  generic::DescriptorCache* _cache;
-  Symbol* _method_name;
-  generic::Context* _ctx;
-  StatefulGenericMethodFamilies _families;
 
- public:
-
-  FindMethodsByGenericSig(generic::DescriptorCache* cache, Symbol* name,
-      generic::Context* ctx, Thread* thread) :
-    _cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {}
-
-  void get_discovered_families(GrowableArray<GenericMethodFamily*>* methods) {
-    _families.extract_families_into(methods);
-  }
-
-  void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
-  void free_node_data(void* node_data) {
-    PseudoScope::cast(node_data)->destroy();
-  }
-
-  bool visit() {
-    PseudoScope* scope = PseudoScope::cast(current_data());
-    InstanceKlass* klass = current_class();
-    InstanceKlass* sub = current_depth() > 0 ? class_at_depth(1) : NULL;
-
-    ContextMark* cm = new ContextMark(_ctx->mark());
-    scope->add_mark(cm); // will restore context when scope is freed
-
-    _ctx->apply_type_arguments(sub, klass, THREAD);
-
-    int start, end = 0;
-    start = klass->find_method_by_name(_method_name, &end);
-    if (start != -1) {
-      for (int i = start; i < end; ++i) {
-        Method* m = klass->methods()->at(i);
-        // This gets the method's parameter list with its generic type
-        // parameters resolved
-        generic::MethodDescriptor* md = _cache->descriptor_for(m, THREAD);
-
-        // Find all methods on this hierarchy that match this method
-        // (name, signature).   This class collects other families of this
-        // method name.
-        StatefulGenericMethodFamily* family =
-            _families.find_matching_or_create(md, _ctx);
-
-        if (klass->is_interface()) {
-          // ???
-          StateRestorer* restorer = family->record_method_and_dq_further(m);
-          scope->add_mark(restorer);
-        } else {
-          // This is the rule that methods in classes "win" (bad word) over
-          // methods in interfaces.  This works because of single inheritance
-          family->set_target_if_empty(m);
-        }
-      }
-    }
-    return true;
-  }
-};
-
-#ifndef PRODUCT
-static void print_generic_families(
-    GrowableArray<GenericMethodFamily*>* methods, Symbol* match) {
-  streamIndentor si(tty, 4);
-  if (methods->length() == 0) {
-    tty->indent();
-    tty->print_cr("No Logical Method found");
-  }
-  for (int i = 0; i < methods->length(); ++i) {
-    tty->indent();
-    GenericMethodFamily* lm = methods->at(i);
-    if (lm->contains_signature(match)) {
-      tty->print_cr("<Matching>");
-    } else {
-      tty->print_cr("<Non-Matching>");
-    }
-    lm->print_sig_on(tty, lm->get_generic_sig(), 1);
-  }
-}
-#endif // ndef PRODUCT
 
 static void create_overpasses(
     GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
 
-static void generate_generic_defaults(
-      InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
-      EmptyVtableSlot* slot, int current_slot_index, TRAPS) {
-
-  if (slot->is_bound()) {
-#ifndef PRODUCT
-    if (TraceDefaultMethods) {
-      streamIndentor si(tty, 4);
-      tty->indent().print_cr("Already bound to logical method:");
-      GenericMethodFamily* lm = (GenericMethodFamily*)(slot->get_binding());
-      lm->print_sig_on(tty, lm->get_generic_sig(), 1);
-    }
-#endif // ndef PRODUCT
-    return; // covered by previous processing
-  }
-
-  generic::DescriptorCache cache;
-
-  generic::Context ctx(&cache);
-  FindMethodsByGenericSig visitor(&cache, slot->name(), &ctx, CHECK);
-  visitor.run(klass);
-
-  GrowableArray<GenericMethodFamily*> discovered_families;
-  visitor.get_discovered_families(&discovered_families);
-
-#ifndef PRODUCT
-  if (TraceDefaultMethods) {
-    print_generic_families(&discovered_families, slot->signature());
-  }
-#endif // ndef PRODUCT
-
-  // Find and populate any other slots that match the discovered families
-  for (int j = current_slot_index; j < empty_slots->length(); ++j) {
-    EmptyVtableSlot* open_slot = empty_slots->at(j);
-
-    if (slot->name() == open_slot->name()) {
-      for (int k = 0; k < discovered_families.length(); ++k) {
-        GenericMethodFamily* lm = discovered_families.at(k);
-
-        if (lm->contains_signature(open_slot->signature())) {
-          lm->determine_target(klass, CHECK);
-          open_slot->bind_family(lm);
-        }
-      }
-    }
-  }
-}
-
 static void generate_erased_defaults(
      InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
      EmptyVtableSlot* slot, TRAPS) {
@@ -943,21 +714,14 @@
 //
 // First if finds any name/signature slots that need any implementation (either
 // because they are miranda or a superclass's implementation is an overpass
-// itself).  For each slot, iterate over the hierarchy, using generic signature
-// information to partition any methods that match the name into method families
-// where each family contains methods whose signatures are equivalent at the
-// language level (i.e., their reified parameters match and return values are
-// covariant). Check those sets to see if they contain a signature that matches
-// the slot we're looking at (if we're lucky, there might be other empty slots
-// that we can fill using the same analysis).
+// itself).  For each slot, iterate over the hierarchy, to see if they contain a
+// signature that matches the slot we are looking at.
 //
 // For each slot filled, we generate an overpass method that either calls the
 // unique default method candidate using invokespecial, or throws an exception
 // (in the case of no default method candidates, or more than one valid
-// candidate).  These methods are then added to the class's method list.  If
-// the method set we're using contains methods (qualified or not) with a
-// different runtime signature than the method we're creating, then we have to
-// create bridges with those signatures too.
+// candidate).  These methods are then added to the class's method list.
+// The JVM does not create bridges nor handle generic signatures here.
 void DefaultMethods::generate_default_methods(
     InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
 
@@ -997,11 +761,7 @@
     }
 #endif // ndef PRODUCT
 
-    if (ParseGenericDefaults) {
-      generate_generic_defaults(klass, empty_slots, slot, i, CHECK);
-    } else {
-      generate_erased_defaults(klass, empty_slots, slot, CHECK);
-    }
+    generate_erased_defaults(klass, empty_slots, slot, CHECK);
  }
 #ifndef PRODUCT
   if (TraceDefaultMethods) {
@@ -1019,13 +779,13 @@
 }
 
 /**
- * Generic analysis was used upon interface '_target' and found a unique
- * default method candidate with generic signature '_method_desc'.  This
+ * Interface inheritance rules were used to find a unique default method
+ * candidate for the resolved class. This
  * method is only viable if it would also be in the set of default method
  * candidates if we ran a full analysis on the current class.
  *
  * The only reason that the method would not be in the set of candidates for
- * the current class is if that there's another covariantly matching method
+ * the current class is if that there's another matching method
  * which is "more specific" than the found method -- i.e., one could find a
  * path in the interface hierarchy in which the matching method appears
  * before we get to '_target'.
@@ -1110,49 +870,6 @@
     : ShadowChecker(thread, name, holder, target) {}
 };
 
-class GenericShadowChecker : public ShadowChecker {
- private:
-  generic::DescriptorCache* _cache;
-  generic::MethodDescriptor* _method_desc;
-
-  bool path_has_shadow() {
-    generic::Context ctx(_cache);
-
-    for (int i = current_depth() - 1; i > 0; --i) {
-      InstanceKlass* ik = class_at_depth(i);
-      InstanceKlass* sub = class_at_depth(i + 1);
-      ctx.apply_type_arguments(sub, ik, THREAD);
-
-      if (ik->is_interface()) {
-        int end;
-        int start = ik->find_method_by_name(_method_name, &end);
-        if (start != -1) {
-          for (int j = start; j < end; ++j) {
-            Method* mo = ik->methods()->at(j);
-            generic::MethodDescriptor* md = _cache->descriptor_for(mo, THREAD);
-            if (_method_desc->covariant_match(md, &ctx)) {
-              return true;
-            }
-          }
-        }
-      }
-    }
-    return false;
-  }
-
- public:
-
-  GenericShadowChecker(generic::DescriptorCache* cache, Thread* thread,
-      Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc,
-      InstanceKlass* target)
-    : ShadowChecker(thread, name, holder, target) {
-      _cache = cache;
-      _method_desc = desc;
- }
-};
-
-
-
 // Find the unique qualified candidate from the perspective of the super_class
 // which is the resolved_klass, which must be an immediate superinterface
 // of klass
@@ -1166,103 +883,48 @@
 
   if (family != NULL) {
     family->determine_target(current_class, CHECK_NULL);  // get target from current_class
-  }
 
-  if (family->has_target()) {
-    Method* target = family->get_selected_target();
-    InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
+    if (family->has_target()) {
+      Method* target = family->get_selected_target();
+      InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
 
-    // Verify that the identified method is valid from the context of
-    // the current class, which is the caller class for invokespecial
-    // link resolution, i.e. ensure there it is not shadowed.
-    // You can use invokespecial to disambiguate interface methods, but
-    // you can not use it to skip over an interface method that would shadow it.
-    ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
-    checker.run(current_class);
+      // Verify that the identified method is valid from the context of
+      // the current class, which is the caller class for invokespecial
+      // link resolution, i.e. ensure there it is not shadowed.
+      // You can use invokespecial to disambiguate interface methods, but
+      // you can not use it to skip over an interface method that would shadow it.
+      ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
+      checker.run(current_class);
 
-    if (checker.found_shadow()) {
+      if (checker.found_shadow()) {
 #ifndef PRODUCT
-      if (TraceDefaultMethods) {
-        tty->print_cr("    Only candidate found was shadowed.");
-      }
+        if (TraceDefaultMethods) {
+          tty->print_cr("    Only candidate found was shadowed.");
+        }
 #endif // ndef PRODUCT
-      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-                 "Accessible default method not found", NULL);
-    } else {
+        THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
+                   "Accessible default method not found", NULL);
+      } else {
 #ifndef PRODUCT
-      if (TraceDefaultMethods) {
-        family->print_sig_on(tty, target->signature(), 1);
-      }
+        if (TraceDefaultMethods) {
+          family->print_sig_on(tty, target->signature(), 1);
+        }
 #endif // ndef PRODUCT
-      return target;
-    }
+        return target;
+      }
+    } else {
+      assert(family->throws_exception(), "must have target or throw");
+      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
+                 family->get_exception_message()->as_C_string(), NULL);
+   }
   } else {
-    assert(family->throws_exception(), "must have target or throw");
-    THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-               family->get_exception_message()->as_C_string(), NULL);
+    // no method found
+    ResourceMark rm(THREAD);
+    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(),
+              Method::name_and_sig_as_C_string(current_class,
+                                               method_name, sig), NULL);
   }
 }
-
-// super_class is assumed to be the direct super of current_class
-Method* find_generic_super_default( InstanceKlass* current_class,
-                                    InstanceKlass* super_class,
-                                    Symbol* method_name, Symbol* sig, TRAPS) {
-  generic::DescriptorCache cache;
-  generic::Context ctx(&cache);
-
-  // Prime the initial generic context for current -> super_class
-  ctx.apply_type_arguments(current_class, super_class, CHECK_NULL);
-
-  FindMethodsByGenericSig visitor(&cache, method_name, &ctx, CHECK_NULL);
-  visitor.run(super_class);
-
-  GrowableArray<GenericMethodFamily*> families;
-  visitor.get_discovered_families(&families);
-
-#ifndef PRODUCT
-  if (TraceDefaultMethods) {
-    print_generic_families(&families, sig);
-  }
-#endif // ndef PRODUCT
-
-  GenericMethodFamily* selected_family = NULL;
-
-  for (int i = 0; i < families.length(); ++i) {
-    GenericMethodFamily* lm = families.at(i);
-    if (lm->contains_signature(sig)) {
-      lm->determine_target(current_class, CHECK_NULL);
-      selected_family = lm;
-    }
-  }
-
-  if (selected_family->has_target()) {
-    Method* target = selected_family->get_selected_target();
-    InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
-
-    // Verify that the identified method is valid from the context of
-    // the current class
-    GenericShadowChecker checker(&cache, THREAD, target->name(),
-        holder, selected_family->descriptor(), super_class);
-    checker.run(current_class);
-
-    if (checker.found_shadow()) {
-#ifndef PRODUCT
-      if (TraceDefaultMethods) {
-        tty->print_cr("    Only candidate found was shadowed.");
-      }
-#endif // ndef PRODUCT
-      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-                 "Accessible default method not found", NULL);
-    } else {
-      return target;
-    }
-  } else {
-    assert(selected_family->throws_exception(), "must have target or throw");
-    THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-               selected_family->get_exception_message()->as_C_string(), NULL);
-  }
-}
-
 // This is called during linktime when we find an invokespecial call that
 // refers to a direct superinterface.  It indicates that we should find the
 // default method in the hierarchy of that superinterface, and if that method
@@ -1296,13 +958,8 @@
   assert(super_class->is_interface(), "only call for default methods");
 
   Method* target = NULL;
-  if (ParseGenericDefaults) {
-    target = find_generic_super_default(current_class, super_class,
-                                        method_name, sig, CHECK_NULL);
-  } else {
-    target = find_erased_super_default(current_class, super_class,
-                                       method_name, sig, CHECK_NULL);
-  }
+  target = find_erased_super_default(current_class, super_class,
+                                     method_name, sig, CHECK_NULL);
 
 #ifndef PRODUCT
   if (target != NULL) {
--- a/hotspot/src/share/vm/classfile/genericSignatures.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1279 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "classfile/genericSignatures.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "memory/resourceArea.hpp"
-
-namespace generic {
-
-// Helper class for parsing the generic signature Symbol in klass and methods
-class DescriptorStream : public ResourceObj {
- private:
-  Symbol* _symbol;
-  int _offset;
-  int _mark;
-  const char* _parse_error;
-
-  void set_parse_error(const char* error) {
-    assert(error != NULL, "Can't set NULL error string");
-    _parse_error = error;
-  }
-
- public:
-  DescriptorStream(Symbol* sym)
-      : _symbol(sym), _offset(0), _mark(-1), _parse_error(NULL) {}
-
-  const char* parse_error() const {
-    return _parse_error;
-  }
-
-  bool at_end() { return _offset >= _symbol->utf8_length(); }
-
-  char peek() {
-    if (at_end()) {
-      set_parse_error("Peeking past end of signature");
-      return '\0';
-    } else {
-      return _symbol->byte_at(_offset);
-    }
-  }
-
-  char read() {
-    if (at_end()) {
-      set_parse_error("Reading past end of signature");
-      return '\0';
-    } else {
-      return _symbol->byte_at(_offset++);
-    }
-  }
-
-  void read(char expected) {
-    char c = read();
-    assert_char(c, expected, 0);
-  }
-
-  void assert_char(char c, char expected, int pos = -1) {
-    if (c != expected) {
-      const char* fmt = "Parse error at %d: expected %c but got %c";
-      size_t len = strlen(fmt) + 5;
-      char* buffer = NEW_RESOURCE_ARRAY(char, len);
-      jio_snprintf(buffer, len, fmt, _offset + pos, expected, c);
-      set_parse_error(buffer);
-    }
-  }
-
-  void push(char c) {
-    assert(c == _symbol->byte_at(_offset - 1), "Pushing back wrong value");
-    --_offset;
-  }
-
-  void expect_end() {
-    if (!at_end()) {
-      set_parse_error("Unexpected data trailing signature");
-    }
-  }
-
-  bool has_mark() { return _mark != -1; }
-
-  void set_mark() {
-    _mark = _offset;
-  }
-
-  Identifier* identifier_from_mark() {
-    assert(has_mark(), "Mark should be set");
-    if (!has_mark()) {
-      set_parse_error("Expected mark to be set");
-      return NULL;
-    } else {
-      Identifier* id = new Identifier(_symbol, _mark, _offset - 1);
-      _mark = -1;
-      return id;
-    }
-  }
-};
-
-
-#define CHECK_FOR_PARSE_ERROR()         \
-  if (STREAM->parse_error() != NULL) {   \
-    if (VerifyGenericSignatures) {      \
-      fatal(STREAM->parse_error());      \
-    }                                   \
-    return NULL;                        \
-  } (void)0
-
-#define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
-#define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
-#define PUSH(c) STREAM->push(c)
-#define EXPECT(c) STREAM->read(c); CHECK_FOR_PARSE_ERROR()
-#define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
-#define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
-
-#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0
-
-#ifndef PRODUCT
-void Identifier::print_on(outputStream* str) const {
-  for (int i = _begin; i < _end; ++i) {
-    str->print("%c", (char)_sym->byte_at(i));
-  }
-}
-#endif // ndef PRODUCT
-
-bool Identifier::equals(Identifier* other) {
-  if (_sym == other->_sym && _begin == other->_begin && _end == other->_end) {
-    return true;
-  } else if (_end - _begin != other->_end - other->_begin) {
-    return false;
-  } else {
-    size_t len = _end - _begin;
-    char* addr = ((char*)_sym->bytes()) + _begin;
-    char* oaddr = ((char*)other->_sym->bytes()) + other->_begin;
-    return strncmp(addr, oaddr, len) == 0;
-  }
-}
-
-bool Identifier::equals(Symbol* sym) {
-  Identifier id(sym, 0, sym->utf8_length());
-  return equals(&id);
-}
-
-/**
- * A formal type parameter may be found in the the enclosing class, but it could
- * also come from an enclosing method or outer class, in the case of inner-outer
- * classes or anonymous classes.  For example:
- *
- * class Outer<T,V> {
- *   class Inner<W> {
- *     void m(T t, V v, W w);
- *   }
- * }
- *
- * In this case, the type variables in m()'s signature are not all found in the
- * immediate enclosing class (Inner).  class Inner has only type parameter W,
- * but it's outer_class field will reference Outer's descriptor which contains
- * T & V (no outer_method in this case).
- *
- * If you have an anonymous class, it has both an enclosing method *and* an
- * enclosing class where type parameters can be declared:
- *
- * class MOuter<T> {
- *   <V> void bar(V v) {
- *     Runnable r = new Runnable() {
- *       public void run() {}
- *       public void foo(T t, V v) { ... }
- *     };
- *   }
- * }
- *
- * In this case, foo will be a member of some class, Runnable$1, which has no
- * formal parameters itself, but has an outer_method (bar()) which provides
- * type parameter V, and an outer class MOuter with type parameter T.
- *
- * It is also possible that the outer class is itself an inner class to some
- * other class (or an anonymous class with an enclosing method), so we need to
- * follow the outer_class/outer_method chain to it's end when looking for a
- * type parameter.
- */
-TypeParameter* Descriptor::find_type_parameter(Identifier* id, int* depth) {
-
-  int current_depth = 0;
-
-  MethodDescriptor* outer_method = as_method_signature();
-  ClassDescriptor* outer_class = as_class_signature();
-
-  if (outer_class == NULL) { // 'this' is a method signature; use the holder
-    outer_class = outer_method->outer_class();
-  }
-
-  while (outer_method != NULL || outer_class != NULL) {
-    if (outer_method != NULL) {
-      for (int i = 0; i < outer_method->type_parameters().length(); ++i) {
-        TypeParameter* p = outer_method->type_parameters().at(i);
-        if (p->identifier()->equals(id)) {
-          *depth = -1; // indicates this this is a method parameter
-          return p;
-        }
-      }
-    }
-    if (outer_class != NULL) {
-      for (int i = 0; i < outer_class->type_parameters().length(); ++i) {
-        TypeParameter* p = outer_class->type_parameters().at(i);
-        if (p->identifier()->equals(id)) {
-          *depth = current_depth;
-          return p;
-        }
-      }
-      outer_method = outer_class->outer_method();
-      outer_class = outer_class->outer_class();
-      ++current_depth;
-    }
-  }
-
-  if (VerifyGenericSignatures) {
-    fatal("Could not resolve identifier");
-  }
-
-  return NULL;
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(Klass* klass, TRAPS) {
-  return parse_generic_signature(klass, NULL, CHECK_NULL);
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(
-      Klass* klass, Symbol* original_name, TRAPS) {
-
-  InstanceKlass* ik = InstanceKlass::cast(klass);
-  Symbol* sym = ik->generic_signature();
-
-  ClassDescriptor* spec;
-
-  if (sym == NULL || (spec = ClassDescriptor::parse_generic_signature(sym)) == NULL) {
-    spec = ClassDescriptor::placeholder(ik);
-  }
-
-  u2 outer_index = get_outer_class_index(ik, CHECK_NULL);
-  if (outer_index != 0) {
-    if (original_name == NULL) {
-      original_name = ik->name();
-    }
-    Handle class_loader = Handle(THREAD, ik->class_loader());
-    Handle protection_domain = Handle(THREAD, ik->protection_domain());
-
-    Symbol* outer_name = ik->constants()->klass_name_at(outer_index);
-    Klass* outer = SystemDictionary::find(
-        outer_name, class_loader, protection_domain, CHECK_NULL);
-    if (outer == NULL && !THREAD->is_Compiler_thread()) {
-      if (outer_name == ik->super()->name()) {
-        outer = SystemDictionary::resolve_super_or_fail(original_name, outer_name,
-                                                        class_loader, protection_domain,
-                                                        false, CHECK_NULL);
-      }
-      else {
-        outer = SystemDictionary::resolve_or_fail(outer_name, class_loader,
-                                                  protection_domain, false, CHECK_NULL);
-      }
-    }
-
-    InstanceKlass* outer_ik;
-    ClassDescriptor* outer_spec = NULL;
-    if (outer == NULL) {
-      outer_spec = ClassDescriptor::placeholder(ik);
-      assert(false, "Outer class not loaded and not loadable from here");
-    } else {
-      outer_ik = InstanceKlass::cast(outer);
-      outer_spec = parse_generic_signature(outer, original_name, CHECK_NULL);
-    }
-    spec->set_outer_class(outer_spec);
-
-    u2 encl_method_idx = ik->enclosing_method_method_index();
-    if (encl_method_idx != 0 && outer_ik != NULL) {
-      ConstantPool* cp = ik->constants();
-      u2 name_index = cp->name_ref_index_at(encl_method_idx);
-      u2 sig_index = cp->signature_ref_index_at(encl_method_idx);
-      Symbol* name = cp->symbol_at(name_index);
-      Symbol* sig = cp->symbol_at(sig_index);
-      Method* m = outer_ik->find_method(name, sig);
-      if (m != NULL) {
-        Symbol* gsig = m->generic_signature();
-        if (gsig != NULL) {
-          MethodDescriptor* gms = MethodDescriptor::parse_generic_signature(gsig, outer_spec);
-          spec->set_outer_method(gms);
-        }
-      } else if (VerifyGenericSignatures) {
-        ResourceMark rm;
-        stringStream ss;
-        ss.print("Could not find method %s %s in class %s",
-          name->as_C_string(), sig->as_C_string(), outer_name->as_C_string());
-        fatal(ss.as_string());
-      }
-    }
-  }
-
-  spec->bind_variables_to_parameters();
-  return spec;
-}
-
-ClassDescriptor* ClassDescriptor::placeholder(InstanceKlass* klass) {
-  GrowableArray<TypeParameter*> formals;
-  GrowableArray<ClassType*> interfaces;
-  ClassType* super_type = NULL;
-
-  Klass* super_klass = klass->super();
-  if (super_klass != NULL) {
-    InstanceKlass* super = InstanceKlass::cast(super_klass);
-    super_type = ClassType::from_symbol(super->name());
-  }
-
-  for (int i = 0; i < klass->local_interfaces()->length(); ++i) {
-    InstanceKlass* iface = InstanceKlass::cast(klass->local_interfaces()->at(i));
-    interfaces.append(ClassType::from_symbol(iface->name()));
-  }
-  return new ClassDescriptor(formals, super_type, interfaces);
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(Symbol* sym) {
-
-  DescriptorStream ds(sym);
-  DescriptorStream* STREAM = &ds;
-
-  GrowableArray<TypeParameter*> parameters(8);
-  char c = READ();
-  if (c == '<') {
-    c = READ();
-    while (c != '>') {
-      PUSH(c);
-      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
-      parameters.append(ftp);
-      c = READ();
-    }
-  } else {
-    PUSH(c);
-  }
-
-  EXPECT('L');
-  ClassType* super = ClassType::parse_generic_signature(CHECK_STREAM);
-
-  GrowableArray<ClassType*> signatures(2);
-  while (!STREAM->at_end()) {
-    EXPECT('L');
-    ClassType* iface = ClassType::parse_generic_signature(CHECK_STREAM);
-    signatures.append(iface);
-  }
-
-  EXPECT_END();
-
-  return new ClassDescriptor(parameters, super, signatures);
-}
-
-#ifndef PRODUCT
-void ClassDescriptor::print_on(outputStream* str) const {
-  str->indent().print_cr("ClassDescriptor {");
-  {
-    streamIndentor si(str);
-    if (_type_parameters.length() > 0) {
-      str->indent().print_cr("Formals {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _type_parameters.length(); ++i) {
-          _type_parameters.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    if (_super != NULL) {
-      str->indent().print_cr("Superclass: ");
-      {
-        streamIndentor si(str);
-        _super->print_on(str);
-      }
-    }
-    if (_interfaces.length() > 0) {
-      str->indent().print_cr("SuperInterfaces: {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _interfaces.length(); ++i) {
-          _interfaces.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    if (_outer_method != NULL) {
-      str->indent().print_cr("Outer Method: {");
-      {
-        streamIndentor si(str);
-        _outer_method->print_on(str);
-      }
-      str->indent().print_cr("}");
-    }
-    if (_outer_class != NULL) {
-      str->indent().print_cr("Outer Class: {");
-      {
-        streamIndentor si(str);
-        _outer_class->print_on(str);
-      }
-      str->indent().print_cr("}");
-    }
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ClassType* ClassDescriptor::interface_desc(Symbol* sym) {
-  for (int i = 0; i < _interfaces.length(); ++i) {
-    if (_interfaces.at(i)->identifier()->equals(sym)) {
-      return _interfaces.at(i);
-    }
-  }
-  if (VerifyGenericSignatures) {
-    fatal("Did not find expected interface");
-  }
-  return NULL;
-}
-
-void ClassDescriptor::bind_variables_to_parameters() {
-  if (_outer_class != NULL) {
-    _outer_class->bind_variables_to_parameters();
-  }
-  if (_outer_method != NULL) {
-    _outer_method->bind_variables_to_parameters();
-  }
-  for (int i = 0; i < _type_parameters.length(); ++i) {
-    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
-  }
-  if (_super != NULL) {
-    _super->bind_variables_to_parameters(this);
-  }
-  for (int i = 0; i < _interfaces.length(); ++i) {
-    _interfaces.at(i)->bind_variables_to_parameters(this);
-  }
-}
-
-ClassDescriptor* ClassDescriptor::canonicalize(Context* ctx) {
-
-  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
-  for (int i = 0; i < _type_parameters.length(); ++i) {
-    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
-  }
-
-  ClassDescriptor* outer = _outer_class == NULL ? NULL :
-      _outer_class->canonicalize(ctx);
-
-  ClassType* super = _super == NULL ? NULL : _super->canonicalize(ctx, 0);
-
-  GrowableArray<ClassType*> interfaces(_interfaces.length());
-  for (int i = 0; i < _interfaces.length(); ++i) {
-    interfaces.append(_interfaces.at(i)->canonicalize(ctx, 0));
-  }
-
-  MethodDescriptor* md = _outer_method == NULL ? NULL :
-      _outer_method->canonicalize(ctx);
-
-  return new ClassDescriptor(type_params, super, interfaces, outer, md);
-}
-
-u2 ClassDescriptor::get_outer_class_index(InstanceKlass* klass, TRAPS) {
-  int inner_index = InstanceKlass::inner_class_inner_class_info_offset;
-  int outer_index = InstanceKlass::inner_class_outer_class_info_offset;
-  int name_offset = InstanceKlass::inner_class_inner_name_offset;
-  int next_offset = InstanceKlass::inner_class_next_offset;
-
-  if (klass->inner_classes() == NULL || klass->inner_classes()->length() == 0) {
-    // No inner class info => no declaring class
-    return 0;
-  }
-
-  Array<u2>* i_icls = klass->inner_classes();
-  ConstantPool* i_cp = klass->constants();
-  int i_length = i_icls->length();
-
-  // Find inner_klass attribute
-  for (int i = 0; i + next_offset < i_length; i += next_offset) {
-    u2 ioff = i_icls->at(i + inner_index);
-    u2 ooff = i_icls->at(i + outer_index);
-    u2 noff = i_icls->at(i + name_offset);
-    if (ioff != 0) {
-      // Check to see if the name matches the class we're looking for
-      // before attempting to find the class.
-      if (i_cp->klass_name_at_matches(klass, ioff) && ooff != 0) {
-        return ooff;
-      }
-    }
-  }
-
-  // It may be anonymous; try for that.
-  u2 encl_method_class_idx = klass->enclosing_method_class_index();
-  if (encl_method_class_idx != 0) {
-    return encl_method_class_idx;
-  }
-
-  return 0;
-}
-
-MethodDescriptor* MethodDescriptor::parse_generic_signature(Method* m, ClassDescriptor* outer) {
-  Symbol* generic_sig = m->generic_signature();
-  MethodDescriptor* md = NULL;
-  if (generic_sig == NULL || (md = parse_generic_signature(generic_sig, outer)) == NULL) {
-    md = parse_generic_signature(m->signature(), outer);
-  }
-  assert(md != NULL, "Could not parse method signature");
-  md->bind_variables_to_parameters();
-  return md;
-}
-
-MethodDescriptor* MethodDescriptor::parse_generic_signature(Symbol* sym, ClassDescriptor* outer) {
-
-  DescriptorStream ds(sym);
-  DescriptorStream* STREAM = &ds;
-
-  GrowableArray<TypeParameter*> params(8);
-  char c = READ();
-  if (c == '<') {
-    c = READ();
-    while (c != '>') {
-      PUSH(c);
-      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
-      params.append(ftp);
-      c = READ();
-    }
-  } else {
-    PUSH(c);
-  }
-
-  EXPECT('(');
-
-  GrowableArray<Type*> parameters(8);
-  c = READ();
-  while (c != ')') {
-    PUSH(c);
-    Type* arg = Type::parse_generic_signature(CHECK_STREAM);
-    parameters.append(arg);
-    c = READ();
-  }
-
-  Type* rt = Type::parse_generic_signature(CHECK_STREAM);
-
-  GrowableArray<Type*> throws;
-  while (!STREAM->at_end()) {
-    EXPECT('^');
-    Type* spec = Type::parse_generic_signature(CHECK_STREAM);
-    throws.append(spec);
-  }
-
-  return new MethodDescriptor(params, outer, parameters, rt, throws);
-}
-
-void MethodDescriptor::bind_variables_to_parameters() {
-  for (int i = 0; i < _type_parameters.length(); ++i) {
-    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
-  }
-  for (int i = 0; i < _parameters.length(); ++i) {
-    _parameters.at(i)->bind_variables_to_parameters(this);
-  }
-  _return_type->bind_variables_to_parameters(this);
-  for (int i = 0; i < _throws.length(); ++i) {
-    _throws.at(i)->bind_variables_to_parameters(this);
-  }
-}
-
-bool MethodDescriptor::covariant_match(MethodDescriptor* other, Context* ctx) {
-
-  if (_parameters.length() == other->_parameters.length()) {
-    for (int i = 0; i < _parameters.length(); ++i) {
-      if (!_parameters.at(i)->covariant_match(other->_parameters.at(i), ctx)) {
-        return false;
-      }
-    }
-
-    if (_return_type->as_primitive() != NULL) {
-      return _return_type->covariant_match(other->_return_type, ctx);
-    } else {
-      // return type is a reference
-      return other->_return_type->as_class() != NULL ||
-             other->_return_type->as_variable() != NULL ||
-             other->_return_type->as_array() != NULL;
-    }
-  } else {
-    return false;
-  }
-}
-
-MethodDescriptor* MethodDescriptor::canonicalize(Context* ctx) {
-
-  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
-  for (int i = 0; i < _type_parameters.length(); ++i) {
-    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
-  }
-
-  ClassDescriptor* outer = _outer_class == NULL ? NULL :
-      _outer_class->canonicalize(ctx);
-
-  GrowableArray<Type*> params(_parameters.length());
-  for (int i = 0; i < _parameters.length(); ++i) {
-    params.append(_parameters.at(i)->canonicalize(ctx, 0));
-  }
-
-  Type* rt = _return_type->canonicalize(ctx, 0);
-
-  GrowableArray<Type*> throws(_throws.length());
-  for (int i = 0; i < _throws.length(); ++i) {
-    throws.append(_throws.at(i)->canonicalize(ctx, 0));
-  }
-
-  return new MethodDescriptor(type_params, outer, params, rt, throws);
-}
-
-#ifndef PRODUCT
-TempNewSymbol MethodDescriptor::reify_signature(Context* ctx, TRAPS) {
-  stringStream ss(256);
-
-  ss.print("(");
-  for (int i = 0; i < _parameters.length(); ++i) {
-    _parameters.at(i)->reify_signature(&ss, ctx);
-  }
-  ss.print(")");
-  _return_type->reify_signature(&ss, ctx);
-  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
-}
-
-void MethodDescriptor::print_on(outputStream* str) const {
-  str->indent().print_cr("MethodDescriptor {");
-  {
-    streamIndentor si(str);
-    if (_type_parameters.length() > 0) {
-      str->indent().print_cr("Formals: {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _type_parameters.length(); ++i) {
-          _type_parameters.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    str->indent().print_cr("Parameters: {");
-    {
-      streamIndentor si(str);
-      for (int i = 0; i < _parameters.length(); ++i) {
-        _parameters.at(i)->print_on(str);
-      }
-    }
-    str->indent().print_cr("}");
-    str->indent().print_cr("Return Type: ");
-    {
-      streamIndentor si(str);
-      _return_type->print_on(str);
-    }
-
-    if (_throws.length() > 0) {
-      str->indent().print_cr("Throws: {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _throws.length(); ++i) {
-          _throws.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-TypeParameter* TypeParameter::parse_generic_signature(DescriptorStream* STREAM) {
-  STREAM->set_mark();
-  char c = READ();
-  while (c != ':') {
-    c = READ();
-  }
-
-  Identifier* id = STREAM->identifier_from_mark();
-
-  ClassType* class_bound = NULL;
-  GrowableArray<ClassType*> interface_bounds(8);
-
-  c = READ();
-  if (c != '>') {
-    if (c != ':') {
-      EXPECTED(c, 'L');
-      class_bound = ClassType::parse_generic_signature(CHECK_STREAM);
-      c = READ();
-    }
-
-    while (c == ':') {
-      EXPECT('L');
-      ClassType* fts = ClassType::parse_generic_signature(CHECK_STREAM);
-      interface_bounds.append(fts);
-      c = READ();
-    }
-  }
-  PUSH(c);
-
-  return new TypeParameter(id, class_bound, interface_bounds);
-}
-
-void TypeParameter::bind_variables_to_parameters(Descriptor* sig, int position) {
-  if (_class_bound != NULL) {
-    _class_bound->bind_variables_to_parameters(sig);
-  }
-  for (int i = 0; i < _interface_bounds.length(); ++i) {
-    _interface_bounds.at(i)->bind_variables_to_parameters(sig);
-  }
-  _position = position;
-}
-
-Type* TypeParameter::resolve(
-    Context* ctx, int inner_depth, int ctx_depth) {
-
-  if (inner_depth == -1) {
-    // This indicates that the parameter is a method type parameter, which
-    // isn't resolveable using the class hierarchy context
-    return bound();
-  }
-
-  ClassType* provider = ctx->at_depth(ctx_depth);
-  if (provider != NULL) {
-    for (int i = 0; i < inner_depth && provider != NULL; ++i) {
-      provider = provider->outer_class();
-    }
-    if (provider != NULL) {
-      TypeArgument* arg = provider->type_argument_at(_position);
-      if (arg != NULL) {
-        Type* value = arg->lower_bound();
-        return value->canonicalize(ctx, ctx_depth + 1);
-      }
-    }
-  }
-
-  return bound();
-}
-
-TypeParameter* TypeParameter::canonicalize(Context* ctx, int ctx_depth) {
-  ClassType* bound = _class_bound == NULL ? NULL :
-     _class_bound->canonicalize(ctx, ctx_depth);
-
-  GrowableArray<ClassType*> ifaces(_interface_bounds.length());
-  for (int i = 0; i < _interface_bounds.length(); ++i) {
-    ifaces.append(_interface_bounds.at(i)->canonicalize(ctx, ctx_depth));
-  }
-
-  TypeParameter* ret = new TypeParameter(_identifier, bound, ifaces);
-  ret->_position = _position;
-  return ret;
-}
-
-ClassType* TypeParameter::bound() {
-  if (_class_bound != NULL) {
-    return _class_bound;
-  }
-
-  if (_interface_bounds.length() == 1) {
-    return _interface_bounds.at(0);
-  }
-
-  return ClassType::java_lang_Object(); // TODO: investigate this case
-}
-
-#ifndef PRODUCT
-void TypeParameter::print_on(outputStream* str) const {
-  str->indent().print_cr("Formal: {");
-  {
-    streamIndentor si(str);
-
-    str->indent().print("Identifier: ");
-    _identifier->print_on(str);
-    str->print_cr("");
-    if (_class_bound != NULL) {
-      str->indent().print_cr("Class Bound: ");
-      streamIndentor si(str);
-      _class_bound->print_on(str);
-    }
-    if (_interface_bounds.length() > 0) {
-      str->indent().print_cr("Interface Bounds: {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _interface_bounds.length(); ++i) {
-          _interface_bounds.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    str->indent().print_cr("Ordinal Position: %d", _position);
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-Type* Type::parse_generic_signature(DescriptorStream* STREAM) {
-  char c = READ();
-  switch (c) {
-    case 'L':
-      return ClassType::parse_generic_signature(CHECK_STREAM);
-    case 'T':
-      return TypeVariable::parse_generic_signature(CHECK_STREAM);
-    case '[':
-      return ArrayType::parse_generic_signature(CHECK_STREAM);
-    default:
-      return new PrimitiveType(c);
-  }
-}
-
-Identifier* ClassType::parse_generic_signature_simple(GrowableArray<TypeArgument*>* args,
-    bool* has_inner, DescriptorStream* STREAM) {
-  STREAM->set_mark();
-
-  char c = READ();
-  while (c != ';' && c != '.' && c != '<') { c = READ(); }
-  Identifier* id = STREAM->identifier_from_mark();
-
-  if (c == '<') {
-    c = READ();
-    while (c != '>') {
-      PUSH(c);
-      TypeArgument* arg = TypeArgument::parse_generic_signature(CHECK_STREAM);
-      args->append(arg);
-      c = READ();
-    }
-    c = READ();
-  }
-
-  *has_inner = (c == '.');
-  if (!(*has_inner)) {
-    EXPECTED(c, ';');
-  }
-
-  return id;
-}
-
-ClassType* ClassType::parse_generic_signature(DescriptorStream* STREAM) {
-  return parse_generic_signature(NULL, CHECK_STREAM);
-}
-
-ClassType* ClassType::parse_generic_signature(ClassType* outer, DescriptorStream* STREAM) {
-  GrowableArray<TypeArgument*> args;
-  ClassType* gct = NULL;
-  bool has_inner = false;
-
-  Identifier* id = parse_generic_signature_simple(&args, &has_inner, STREAM);
-  if (id != NULL) {
-    gct = new ClassType(id, args, outer);
-
-    if (has_inner) {
-      gct = parse_generic_signature(gct, CHECK_STREAM);
-    }
-  }
-  return gct;
-}
-
-ClassType* ClassType::from_symbol(Symbol* sym) {
-  assert(sym != NULL, "Must not be null");
-  GrowableArray<TypeArgument*> args;
-  Identifier* id = new Identifier(sym, 0, sym->utf8_length());
-  return new ClassType(id, args, NULL);
-}
-
-ClassType* ClassType::java_lang_Object() {
-  return from_symbol(vmSymbols::java_lang_Object());
-}
-
-void ClassType::bind_variables_to_parameters(Descriptor* sig) {
-  for (int i = 0; i < _type_arguments.length(); ++i) {
-    _type_arguments.at(i)->bind_variables_to_parameters(sig);
-  }
-  if (_outer_class != NULL) {
-    _outer_class->bind_variables_to_parameters(sig);
-  }
-}
-
-TypeArgument* ClassType::type_argument_at(int i) {
-  if (i >= 0 && i < _type_arguments.length()) {
-    return _type_arguments.at(i);
-  } else {
-    return NULL;
-  }
-}
-
-#ifndef PRODUCT
-void ClassType::reify_signature(stringStream* ss, Context* ctx) {
-  ss->print("L");
-  _identifier->print_on(ss);
-  ss->print(";");
-}
-
-void ClassType::print_on(outputStream* str) const {
-  str->indent().print_cr("Class {");
-  {
-    streamIndentor si(str);
-    str->indent().print("Name: ");
-    _identifier->print_on(str);
-    str->print_cr("");
-    if (_type_arguments.length() != 0) {
-      str->indent().print_cr("Type Arguments: {");
-      {
-        streamIndentor si(str);
-        for (int j = 0; j < _type_arguments.length(); ++j) {
-          _type_arguments.at(j)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    if (_outer_class != NULL) {
-      str->indent().print_cr("Outer Class: ");
-      streamIndentor sir(str);
-      _outer_class->print_on(str);
-    }
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-bool ClassType::covariant_match(Type* other, Context* ctx) {
-
-  if (other == this) {
-    return true;
-  }
-
-  TypeVariable* variable = other->as_variable();
-  if (variable != NULL) {
-    other = variable->resolve(ctx, 0);
-  }
-
-  ClassType* outer = outer_class();
-  ClassType* other_class = other->as_class();
-
-  if (other_class == NULL ||
-      (outer == NULL) != (other_class->outer_class() == NULL)) {
-    return false;
-  }
-
-  if (!_identifier->equals(other_class->_identifier)) {
-    return false;
-  }
-
-  if (outer != NULL && !outer->covariant_match(other_class->outer_class(), ctx)) {
-    return false;
-  }
-
-  return true;
-}
-
-ClassType* ClassType::canonicalize(Context* ctx, int ctx_depth) {
-
-  GrowableArray<TypeArgument*> args(_type_arguments.length());
-  for (int i = 0; i < _type_arguments.length(); ++i) {
-    args.append(_type_arguments.at(i)->canonicalize(ctx, ctx_depth));
-  }
-
-  ClassType* outer = _outer_class == NULL ? NULL :
-      _outer_class->canonicalize(ctx, ctx_depth);
-
-  return new ClassType(_identifier, args, outer);
-}
-
-TypeVariable* TypeVariable::parse_generic_signature(DescriptorStream* STREAM) {
-  STREAM->set_mark();
-  char c = READ();
-  while (c != ';') {
-    c = READ();
-  }
-  Identifier* id = STREAM->identifier_from_mark();
-
-  return new TypeVariable(id);
-}
-
-void TypeVariable::bind_variables_to_parameters(Descriptor* sig) {
-  _parameter = sig->find_type_parameter(_id, &_inner_depth);
-  if (VerifyGenericSignatures && _parameter == NULL) {
-    fatal("Could not find formal parameter");
-  }
-}
-
-Type* TypeVariable::resolve(Context* ctx, int ctx_depth) {
-  if (parameter() != NULL) {
-    return parameter()->resolve(ctx, inner_depth(), ctx_depth);
-  } else {
-    if (VerifyGenericSignatures) {
-      fatal("Type variable matches no parameter");
-    }
-    return NULL;
-  }
-}
-
-bool TypeVariable::covariant_match(Type* other, Context* ctx) {
-
-  if (other == this) {
-    return true;
-  }
-
-  Context my_context(NULL); // empty, results in erasure
-  Type* my_type = resolve(&my_context, 0);
-  if (my_type == NULL) {
-    return false;
-  }
-
-  return my_type->covariant_match(other, ctx);
-}
-
-Type* TypeVariable::canonicalize(Context* ctx, int ctx_depth) {
-  return resolve(ctx, ctx_depth);
-}
-
-#ifndef PRODUCT
-void TypeVariable::reify_signature(stringStream* ss, Context* ctx) {
-  Type* type = resolve(ctx, 0);
-  if (type != NULL) {
-    type->reify_signature(ss, ctx);
-  }
-}
-
-void TypeVariable::print_on(outputStream* str) const {
-  str->indent().print_cr("Type Variable {");
-  {
-    streamIndentor si(str);
-    str->indent().print("Name: ");
-    _id->print_on(str);
-    str->print_cr("");
-    str->indent().print_cr("Inner depth: %d", _inner_depth);
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ArrayType* ArrayType::parse_generic_signature(DescriptorStream* STREAM) {
-  Type* base = Type::parse_generic_signature(CHECK_STREAM);
-  return new ArrayType(base);
-}
-
-void ArrayType::bind_variables_to_parameters(Descriptor* sig) {
-  assert(_base != NULL, "Invalid base");
-  _base->bind_variables_to_parameters(sig);
-}
-
-bool ArrayType::covariant_match(Type* other, Context* ctx) {
-  assert(_base != NULL, "Invalid base");
-
-  if (other == this) {
-    return true;
-  }
-
-  ArrayType* other_array = other->as_array();
-  return (other_array != NULL && _base->covariant_match(other_array->_base, ctx));
-}
-
-ArrayType* ArrayType::canonicalize(Context* ctx, int ctx_depth) {
-  assert(_base != NULL, "Invalid base");
-  return new ArrayType(_base->canonicalize(ctx, ctx_depth));
-}
-
-#ifndef PRODUCT
-void ArrayType::reify_signature(stringStream* ss, Context* ctx) {
-  assert(_base != NULL, "Invalid base");
-  ss->print("[");
-  _base->reify_signature(ss, ctx);
-}
-
-void ArrayType::print_on(outputStream* str) const {
-  str->indent().print_cr("Array {");
-  {
-    streamIndentor si(str);
-    _base->print_on(str);
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-bool PrimitiveType::covariant_match(Type* other, Context* ctx) {
-
-  PrimitiveType* other_prim = other->as_primitive();
-  return (other_prim != NULL && _type == other_prim->_type);
-}
-
-PrimitiveType* PrimitiveType::canonicalize(Context* ctx, int ctxd) {
-  return this;
-}
-
-#ifndef PRODUCT
-void PrimitiveType::reify_signature(stringStream* ss, Context* ctx) {
-  ss->print("%c", _type);
-}
-
-void PrimitiveType::print_on(outputStream* str) const {
-  str->indent().print_cr("Primitive: '%c'", _type);
-}
-#endif // ndef PRODUCT
-
-void PrimitiveType::bind_variables_to_parameters(Descriptor* sig) {
-}
-
-TypeArgument* TypeArgument::parse_generic_signature(DescriptorStream* STREAM) {
-  char c = READ();
-  Type* type = NULL;
-
-  switch (c) {
-    case '*':
-      return new TypeArgument(ClassType::java_lang_Object(), NULL);
-      break;
-    default:
-      PUSH(c);
-      // fall-through
-    case '+':
-    case '-':
-      type = Type::parse_generic_signature(CHECK_STREAM);
-      if (c == '+') {
-        return new TypeArgument(type, NULL);
-      } else if (c == '-') {
-        return new TypeArgument(ClassType::java_lang_Object(), type);
-      } else {
-        return new TypeArgument(type, type);
-      }
-  }
-}
-
-void TypeArgument::bind_variables_to_parameters(Descriptor* sig) {
-  assert(_lower_bound != NULL, "Invalid lower bound");
-  _lower_bound->bind_variables_to_parameters(sig);
-  if (_upper_bound != NULL && _upper_bound != _lower_bound) {
-    _upper_bound->bind_variables_to_parameters(sig);
-  }
-}
-
-bool TypeArgument::covariant_match(TypeArgument* other, Context* ctx) {
-  assert(_lower_bound != NULL, "Invalid lower bound");
-
-  if (other == this) {
-    return true;
-  }
-
-  if (!_lower_bound->covariant_match(other->lower_bound(), ctx)) {
-    return false;
-  }
-  return true;
-}
-
-TypeArgument* TypeArgument::canonicalize(Context* ctx, int ctx_depth) {
-  assert(_lower_bound != NULL, "Invalid lower bound");
-  Type* lower = _lower_bound->canonicalize(ctx, ctx_depth);
-  Type* upper = NULL;
-
-  if (_upper_bound == _lower_bound) {
-    upper = lower;
-  } else if (_upper_bound != NULL) {
-    upper = _upper_bound->canonicalize(ctx, ctx_depth);
-  }
-
-  return new TypeArgument(lower, upper);
-}
-
-#ifndef PRODUCT
-void TypeArgument::print_on(outputStream* str) const {
-  str->indent().print_cr("TypeArgument {");
-  {
-    streamIndentor si(str);
-    if (_lower_bound != NULL) {
-      str->indent().print("Lower bound: ");
-      _lower_bound->print_on(str);
-    }
-    if (_upper_bound != NULL) {
-      str->indent().print("Upper bound: ");
-      _upper_bound->print_on(str);
-    }
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-void Context::Mark::destroy() {
-  if (is_active()) {
-    _context->reset_to_mark(_marked_size);
-  }
-  deactivate();
-}
-
-void Context::apply_type_arguments(
-    InstanceKlass* current, InstanceKlass* super, TRAPS) {
-  assert(_cache != NULL, "Cannot use an empty context");
-  ClassType* spec = NULL;
-  if (current != NULL) {
-    ClassDescriptor* descriptor = _cache->descriptor_for(current, CHECK);
-    if (super == current->super()) {
-      spec = descriptor->super();
-    } else {
-      spec = descriptor->interface_desc(super->name());
-    }
-    if (spec != NULL) {
-      _type_arguments.push(spec);
-    }
-  }
-}
-
-void Context::reset_to_mark(int size) {
-  _type_arguments.trunc_to(size);
-}
-
-ClassType* Context::at_depth(int i) const {
-  if (i < _type_arguments.length()) {
-    return _type_arguments.at(_type_arguments.length() - 1 - i);
-  }
-  return NULL;
-}
-
-#ifndef PRODUCT
-void Context::print_on(outputStream* str) const {
-  str->indent().print_cr("Context {");
-  for (int i = 0; i < _type_arguments.length(); ++i) {
-    streamIndentor si(str);
-    str->indent().print("leval %d: ", i);
-    ClassType* ct = at_depth(i);
-    if (ct == NULL) {
-      str->print_cr("<empty>");
-      continue;
-    } else {
-      str->print_cr("{");
-    }
-
-    for (int j = 0; j < ct->type_arguments_length(); ++j) {
-      streamIndentor si(str);
-      TypeArgument* ta = ct->type_argument_at(j);
-      Type* bound = ta->lower_bound();
-      bound->print_on(str);
-    }
-    str->indent().print_cr("}");
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ClassDescriptor* DescriptorCache::descriptor_for(InstanceKlass* ik, TRAPS) {
-
-  ClassDescriptor** existing = _class_descriptors.get(ik);
-  if (existing == NULL) {
-    ClassDescriptor* cd = ClassDescriptor::parse_generic_signature(ik, CHECK_NULL);
-    _class_descriptors.put(ik, cd);
-    return cd;
-  } else {
-    return *existing;
-  }
-}
-
-MethodDescriptor* DescriptorCache::descriptor_for(
-    Method* mh, ClassDescriptor* cd, TRAPS) {
-  assert(mh != NULL && cd != NULL, "Should not be NULL");
-  MethodDescriptor** existing = _method_descriptors.get(mh);
-  if (existing == NULL) {
-    MethodDescriptor* md = MethodDescriptor::parse_generic_signature(mh, cd);
-    _method_descriptors.put(mh, md);
-    return md;
-  } else {
-    return *existing;
-  }
-}
-MethodDescriptor* DescriptorCache::descriptor_for(Method* mh, TRAPS) {
-  ClassDescriptor* cd = descriptor_for(
-      InstanceKlass::cast(mh->method_holder()), CHECK_NULL);
-  return descriptor_for(mh, cd, THREAD);
-}
-
-} // namespace generic
--- a/hotspot/src/share/vm/classfile/genericSignatures.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,467 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-#define SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-
-#include "classfile/symbolTable.hpp"
-#include "memory/allocation.hpp"
-#include "runtime/signature.hpp"
-#include "utilities/growableArray.hpp"
-#include "utilities/resourceHash.hpp"
-
-class stringStream;
-
-namespace generic {
-
-class Identifier;
-class ClassDescriptor;
-class MethodDescriptor;
-
-class TypeParameter; // a formal type parameter declared in generic signatures
-class TypeArgument;  // The "type value" passed to fill parameters in supertypes
-class TypeVariable;  // A usage of a type parameter as a value
-/**
- * Example:
- *
- * <T, V> class Foo extends Bar<String> { int m(V v) {} }
- * ^^^^^^                       ^^^^^^          ^^
- * type parameters            type argument    type variable
- *
- * Note that a type variable could be passed as an argument too:
- * <T, V> class Foo extends Bar<T> { int m(V v) {} }
- *                             ^^^
- *                             type argument's value is a type variable
- */
-
-
-class Type;
-class ClassType;
-class ArrayType;
-class PrimitiveType;
-class Context;
-class DescriptorCache;
-
-class DescriptorStream;
-
-class Identifier : public ResourceObj {
- private:
-  Symbol* _sym;
-  int _begin;
-  int _end;
-
- public:
-  Identifier(Symbol* sym, int begin, int end) :
-    _sym(sym), _begin(begin), _end(end) {}
-
-  bool equals(Identifier* other);
-  bool equals(Symbol* sym);
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif // ndef PRODUCT
-};
-
-class Descriptor : public ResourceObj {
- protected:
-  GrowableArray<TypeParameter*> _type_parameters;
-  ClassDescriptor* _outer_class;
-
-  Descriptor(GrowableArray<TypeParameter*>& params,
-    ClassDescriptor* outer)
-    : _type_parameters(params), _outer_class(outer) {}
-
- public:
-
-  ClassDescriptor* outer_class() { return _outer_class; }
-  void set_outer_class(ClassDescriptor* sig) { _outer_class = sig; }
-
-  virtual ClassDescriptor* as_class_signature() { return NULL; }
-  virtual MethodDescriptor* as_method_signature() { return NULL; }
-
-  bool is_class_signature() { return as_class_signature() != NULL; }
-  bool is_method_signature() { return as_method_signature() != NULL; }
-
-  GrowableArray<TypeParameter*>& type_parameters() {
-    return _type_parameters;
-  }
-
-  TypeParameter* find_type_parameter(Identifier* id, int* param_depth);
-
-  virtual void bind_variables_to_parameters() = 0;
-
-#ifndef PRODUCT
-  virtual void print_on(outputStream* str) const = 0;
-#endif
-};
-
-class ClassDescriptor : public Descriptor {
- private:
-  ClassType* _super;
-  GrowableArray<ClassType*> _interfaces;
-  MethodDescriptor* _outer_method;
-
-  ClassDescriptor(GrowableArray<TypeParameter*>& ftp, ClassType* scs,
-      GrowableArray<ClassType*>& sis, ClassDescriptor* outer_class = NULL,
-      MethodDescriptor* outer_method = NULL)
-        : Descriptor(ftp, outer_class), _super(scs), _interfaces(sis),
-          _outer_method(outer_method) {}
-
-  static u2 get_outer_class_index(InstanceKlass* k, TRAPS);
-  static ClassDescriptor* parse_generic_signature(Klass* k, Symbol* original_name, TRAPS);
-
- public:
-
-  virtual ClassDescriptor* as_class_signature() { return this; }
-
-  MethodDescriptor* outer_method() { return _outer_method; }
-  void set_outer_method(MethodDescriptor* m) { _outer_method = m; }
-
-  ClassType* super() { return _super; }
-  ClassType* interface_desc(Symbol* sym);
-
-  static ClassDescriptor* parse_generic_signature(Klass* k, TRAPS);
-  static ClassDescriptor* parse_generic_signature(Symbol* sym);
-
-  // For use in superclass chains in positions where this is no generic info
-  static ClassDescriptor* placeholder(InstanceKlass* klass);
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif
-
-  ClassDescriptor* canonicalize(Context* ctx);
-
-  // Linking sets the position index in any contained TypeVariable type
-  // to correspond to the location of that identifier in the formal type
-  // parameters.
-  void bind_variables_to_parameters();
-};
-
-class MethodDescriptor : public Descriptor {
- private:
-  GrowableArray<Type*> _parameters;
-  Type* _return_type;
-  GrowableArray<Type*> _throws;
-
-  MethodDescriptor(GrowableArray<TypeParameter*>& ftp, ClassDescriptor* outer,
-      GrowableArray<Type*>& sigs, Type* rt, GrowableArray<Type*>& throws)
-      : Descriptor(ftp, outer), _parameters(sigs), _return_type(rt),
-        _throws(throws) {}
-
- public:
-
-  static MethodDescriptor* parse_generic_signature(Method* m, ClassDescriptor* outer);
-  static MethodDescriptor* parse_generic_signature(Symbol* sym, ClassDescriptor* outer);
-
-  MethodDescriptor* as_method_signature() { return this; }
-
-  // Performs generic analysis on the method parameters to determine
-  // if both methods refer to the same argument types.
-  bool covariant_match(MethodDescriptor* other, Context* ctx);
-
-  // Returns a new method descriptor with all generic variables
-  // removed and replaced with whatever is indicated using the Context.
-  MethodDescriptor* canonicalize(Context* ctx);
-
-  void bind_variables_to_parameters();
-
-#ifndef PRODUCT
-  TempNewSymbol reify_signature(Context* ctx, TRAPS);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeParameter : public ResourceObj {
- private:
-  Identifier* _identifier;
-  ClassType* _class_bound;
-  GrowableArray<ClassType*> _interface_bounds;
-
-  // The position is the ordinal location of the parameter within the
-  // formal parameter list (excluding outer classes).  It is only set for
-  // formal type parameters that are associated with a class -- method
-  // type parameters are left as -1.  When resolving a generic variable to
-  // find the actual type, this index is used to access the generic type
-  // argument in the provided context object.
-  int _position; // Assigned during variable linking
-
-  TypeParameter(Identifier* id, ClassType* class_bound,
-    GrowableArray<ClassType*>& interface_bounds) :
-      _identifier(id), _class_bound(class_bound),
-      _interface_bounds(interface_bounds), _position(-1) {}
-
- public:
-  static TypeParameter* parse_generic_signature(DescriptorStream* str);
-
-  ClassType* bound();
-  int position() { return _position; }
-
-  void bind_variables_to_parameters(Descriptor* sig, int position);
-  Identifier* identifier() { return _identifier; }
-
-  Type* resolve(Context* ctx, int inner_depth, int ctx_depth);
-  TypeParameter* canonicalize(Context* ctx, int ctx_depth);
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class Type : public ResourceObj {
- public:
-  static Type* parse_generic_signature(DescriptorStream* str);
-
-  virtual ClassType* as_class() { return NULL; }
-  virtual TypeVariable* as_variable() { return NULL; }
-  virtual ArrayType* as_array() { return NULL; }
-  virtual PrimitiveType* as_primitive() { return NULL; }
-
-  virtual bool covariant_match(Type* gt, Context* ctx) = 0;
-  virtual Type* canonicalize(Context* ctx, int ctx_depth) = 0;
-
-  virtual void bind_variables_to_parameters(Descriptor* sig) = 0;
-
-#ifndef PRODUCT
-  virtual void reify_signature(stringStream* ss, Context* ctx) = 0;
-  virtual void print_on(outputStream* str) const = 0;
-#endif
-};
-
-class ClassType : public Type {
-  friend class ClassDescriptor;
- protected:
-  Identifier* _identifier;
-  GrowableArray<TypeArgument*> _type_arguments;
-  ClassType* _outer_class;
-
-  ClassType(Identifier* identifier,
-      GrowableArray<TypeArgument*>& args,
-      ClassType* outer)
-      : _identifier(identifier), _type_arguments(args), _outer_class(outer) {}
-
-  // Returns true if there are inner classes to read
-  static Identifier* parse_generic_signature_simple(
-      GrowableArray<TypeArgument*>* args,
-      bool* has_inner, DescriptorStream* str);
-
-  static ClassType* parse_generic_signature(ClassType* outer,
-      DescriptorStream* str);
-  static ClassType* from_symbol(Symbol* sym);
-
- public:
-  ClassType* as_class() { return this; }
-
-  static ClassType* parse_generic_signature(DescriptorStream* str);
-  static ClassType* java_lang_Object();
-
-  Identifier* identifier() { return _identifier; }
-  int type_arguments_length() { return _type_arguments.length(); }
-  TypeArgument* type_argument_at(int i);
-
-  virtual ClassType* outer_class() { return _outer_class; }
-
-  bool covariant_match(Type* gt, Context* ctx);
-  ClassType* canonicalize(Context* ctx, int context_depth);
-
-  void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
-  void reify_signature(stringStream* ss, Context* ctx);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeVariable : public Type {
- private:
-  Identifier* _id;
-  TypeParameter* _parameter; // assigned during linking
-
-  // how many steps "out" from inner classes, -1 if method
-  int _inner_depth;
-
-  TypeVariable(Identifier* id)
-      : _id(id), _parameter(NULL), _inner_depth(0) {}
-
- public:
-  TypeVariable* as_variable() { return this; }
-
-  static TypeVariable* parse_generic_signature(DescriptorStream* str);
-
-  Identifier* identifier() { return _id; }
-  TypeParameter* parameter() { return _parameter; }
-  int inner_depth() { return _inner_depth; }
-
-  void bind_variables_to_parameters(Descriptor* sig);
-
-  Type* resolve(Context* ctx, int ctx_depth);
-  bool covariant_match(Type* gt, Context* ctx);
-  Type* canonicalize(Context* ctx, int ctx_depth);
-
-#ifndef PRODUCT
-  void reify_signature(stringStream* ss, Context* ctx);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class ArrayType : public Type {
- private:
-  Type* _base;
-
-  ArrayType(Type* base) : _base(base) {}
-
- public:
-  ArrayType* as_array() { return this; }
-
-  static ArrayType* parse_generic_signature(DescriptorStream* str);
-
-  bool covariant_match(Type* gt, Context* ctx);
-  ArrayType* canonicalize(Context* ctx, int ctx_depth);
-
-  void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
-  void reify_signature(stringStream* ss, Context* ctx);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class PrimitiveType : public Type {
-  friend class Type;
- private:
-  char _type; // includes V for void
-
-  PrimitiveType(char& type) : _type(type) {}
-
- public:
-  PrimitiveType* as_primitive() { return this; }
-
-  bool covariant_match(Type* gt, Context* ctx);
-  PrimitiveType* canonicalize(Context* ctx, int ctx_depth);
-
-  void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
-  void reify_signature(stringStream* ss, Context* ctx);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeArgument : public ResourceObj {
- private:
-  Type* _lower_bound;
-  Type* _upper_bound; // may be null or == _lower_bound
-
-  TypeArgument(Type* lower_bound, Type* upper_bound)
-      : _lower_bound(lower_bound), _upper_bound(upper_bound) {}
-
- public:
-
-  static TypeArgument* parse_generic_signature(DescriptorStream* str);
-
-  Type* lower_bound() { return _lower_bound; }
-  Type* upper_bound() { return _upper_bound; }
-
-  void bind_variables_to_parameters(Descriptor* sig);
-  TypeArgument* canonicalize(Context* ctx, int ctx_depth);
-
-  bool covariant_match(TypeArgument* a, Context* ctx);
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif
-};
-
-
-class Context : public ResourceObj {
- private:
-  DescriptorCache* _cache;
-  GrowableArray<ClassType*> _type_arguments;
-
-  void reset_to_mark(int size);
-
- public:
-  // When this object goes out of scope or 'destroy' is
-  // called, then the application of the type to the
-  // context is wound-back (unless it's been deactivated).
-  class Mark : public StackObj {
-   private:
-    mutable Context* _context;
-    int _marked_size;
-
-    bool is_active() const { return _context != NULL; }
-    void deactivate() const { _context = NULL; }
-
-   public:
-    Mark() : _context(NULL), _marked_size(0) {}
-    Mark(Context* ctx, int sz) : _context(ctx), _marked_size(sz) {}
-    Mark(const Mark& m) : _context(m._context), _marked_size(m._marked_size) {
-      m.deactivate(); // Ownership is transferred
-    }
-
-    Mark& operator=(const Mark& cm) {
-      destroy();
-      _context = cm._context;
-      _marked_size = cm._marked_size;
-      cm.deactivate();
-      return *this;
-    }
-
-    void destroy();
-    ~Mark() { destroy(); }
-  };
-
-  Context(DescriptorCache* cache) : _cache(cache) {}
-
-  Mark mark() { return Mark(this, _type_arguments.length()); }
-  void apply_type_arguments(InstanceKlass* current, InstanceKlass* super,TRAPS);
-
-  ClassType* at_depth(int i) const;
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif
-};
-
-/**
- * Contains a cache of descriptors for classes and methods so they can be
- * looked-up instead of reparsing each time they are needed.
- */
-class DescriptorCache : public ResourceObj {
- private:
-  ResourceHashtable<InstanceKlass*, ClassDescriptor*> _class_descriptors;
-  ResourceHashtable<Method*, MethodDescriptor*> _method_descriptors;
-
- public:
-  ClassDescriptor* descriptor_for(InstanceKlass* ikh, TRAPS);
-
-  MethodDescriptor* descriptor_for(Method* mh, ClassDescriptor* cd, TRAPS);
-  // Class descriptor derived from method holder
-  MethodDescriptor* descriptor_for(Method* mh, TRAPS);
-};
-
-} // namespace generic
-
-#endif // SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -188,6 +188,10 @@
 bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
   Symbol* name = klass->name();
   Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
+  Klass* lambda_magic_klass = SystemDictionary::lambda_MagicLambdaImpl_klass();
+
+  bool is_reflect = refl_magic_klass != NULL && klass->is_subtype_of(refl_magic_klass);
+  bool is_lambda = lambda_magic_klass != NULL && klass->is_subtype_of(lambda_magic_klass);
 
   return (should_verify_for(klass->class_loader(), should_verify_class) &&
     // return if the class is a bootstrapping class
@@ -210,9 +214,9 @@
     // sun/reflect/SerializationConstructorAccessor.
     // NOTE: this is called too early in the bootstrapping process to be
     // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
-    (refl_magic_klass == NULL ||
-     !klass->is_subtype_of(refl_magic_klass) ||
-     VerifyReflectionBytecodes)
+    // Also for lambda generated code, gte jdk8
+    (!is_reflect || VerifyReflectionBytecodes) &&
+    (!is_lambda || VerifyLambdaBytecodes)
   );
 }
 
@@ -2318,9 +2322,6 @@
       types = 1 << JVM_CONSTANT_InvokeDynamic;
       break;
     case Bytecodes::_invokespecial:
-      types = (1 << JVM_CONSTANT_InterfaceMethodref) |
-              (1 << JVM_CONSTANT_Methodref);
-      break;
     case Bytecodes::_invokestatic:
       types = (_klass->major_version() < STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION) ?
         (1 << JVM_CONSTANT_Methodref) :
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -245,7 +245,7 @@
 }
 
 
-void* BufferBlob::operator new(size_t s, unsigned size) {
+void* BufferBlob::operator new(size_t s, unsigned size) throw() {
   void* p = CodeCache::allocate(size);
   return p;
 }
@@ -347,14 +347,14 @@
 }
 
 
-void* RuntimeStub::operator new(size_t s, unsigned size) {
+void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
   void* p = CodeCache::allocate(size, true);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
 }
 
 // operator new shared by all singletons:
-void* SingletonBlob::operator new(size_t s, unsigned size) {
+void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
   void* p = CodeCache::allocate(size, true);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -209,7 +209,7 @@
   BufferBlob(const char* name, int size);
   BufferBlob(const char* name, int size, CodeBuffer* cb);
 
-  void* operator new(size_t s, unsigned size);
+  void* operator new(size_t s, unsigned size) throw();
 
  public:
   // Creation
@@ -283,7 +283,7 @@
     bool        caller_must_gc_arguments
   );
 
-  void* operator new(size_t s, unsigned size);
+  void* operator new(size_t s, unsigned size) throw();
 
  public:
   // Creation
@@ -321,7 +321,7 @@
   friend class VMStructs;
 
  protected:
-  void* operator new(size_t s, unsigned size);
+  void* operator new(size_t s, unsigned size) throw();
 
  public:
    SingletonBlob(
--- a/hotspot/src/share/vm/code/debugInfoRec.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/code/debugInfoRec.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
   int  _length; // number of bytes in the stream
   int  _hash;   // hash of stream bytes (for quicker reuse)
 
-  void* operator new(size_t ignore, DebugInformationRecorder* dir) {
+  void* operator new(size_t ignore, DebugInformationRecorder* dir) throw() {
     assert(ignore == sizeof(DIR_Chunk), "");
     if (dir->_next_chunk >= dir->_next_chunk_limit) {
       const int CHUNK = 100;
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,18 +93,21 @@
 #endif
 
 bool nmethod::is_compiled_by_c1() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_c1();
 }
 bool nmethod::is_compiled_by_c2() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_c2();
 }
 bool nmethod::is_compiled_by_shark() const {
-  if (is_native_method()) return false;
-  assert(compiler() != NULL, "must be");
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_shark();
 }
 
@@ -800,7 +803,7 @@
 }
 #endif // def HAVE_DTRACE_H
 
-void* nmethod::operator new(size_t size, int nmethod_size) throw () {
+void* nmethod::operator new(size_t size, int nmethod_size) throw() {
   // Not critical, may return null if there is too little continuous memory
   return CodeCache::allocate(nmethod_size);
 }
@@ -1401,6 +1404,9 @@
     // nmethods aren't scanned for GC.
     _oops_are_stale = true;
 #endif
+     // the Method may be reclaimed by class unloading now that the
+     // nmethod is in zombie state
+    set_method(NULL);
   } else {
     assert(state == not_entrant, "other cases may need to be handled differently");
   }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -265,7 +265,7 @@
           int comp_level);
 
   // helper methods
-  void* operator new(size_t size, int nmethod_size);
+  void* operator new(size_t size, int nmethod_size) throw();
 
   const char* reloc_string_for(u_char* begin, u_char* end);
   // Returns true if this thread changed the state of the nmethod or
--- a/hotspot/src/share/vm/code/relocInfo.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/code/relocInfo.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -677,7 +677,7 @@
   }
 
  public:
-  void* operator new(size_t size, const RelocationHolder& holder) {
+  void* operator new(size_t size, const RelocationHolder& holder) throw() {
     if (size > sizeof(holder._relocbuf)) guarantee_size();
     assert((void* const *)holder.reloc() == &holder._relocbuf[0], "ptrs must agree");
     return holder.reloc();
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
 static int num_vtable_chunks = 0;
 
 
-void* VtableStub::operator new(size_t size, int code_size) {
+void* VtableStub::operator new(size_t size, int code_size) throw() {
   assert(size == sizeof(VtableStub), "mismatched size");
   num_vtable_chunks++;
   // compute real VtableStub size (rounded to nearest word)
--- a/hotspot/src/share/vm/code/vtableStubs.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/code/vtableStubs.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@
   bool           _is_vtable_stub;    // True if vtable stub, false, is itable stub
   /* code follows here */            // The vtableStub code
 
-  void* operator new(size_t size, int code_size);
+  void* operator new(size_t size, int code_size) throw();
 
   VtableStub(bool is_vtable_stub, int index)
         : _next(NULL), _is_vtable_stub(is_vtable_stub),
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1718,7 +1718,7 @@
     CodeCache::print_summary(&s, detailed);
   }
   ttyLocker ttyl;
-  tty->print_cr(s.as_string());
+  tty->print(s.as_string());
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -2493,11 +2493,11 @@
 
 void G1CollectedHeap::register_concurrent_cycle_end() {
   if (_concurrent_cycle_started) {
-    _gc_timer_cm->register_gc_end(os::elapsed_counter());
-
     if (_cm->has_aborted()) {
       _gc_tracer_cm->report_concurrent_mode_failure();
     }
+
+    _gc_timer_cm->register_gc_end(os::elapsed_counter());
     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 
     _concurrent_cycle_started = false;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -168,7 +168,15 @@
   // Set up the region size and associated fields. Given that the
   // policy is created before the heap, we have to set this up here,
   // so it's done as soon as possible.
-  HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
+
+  // It would have been natural to pass initial_heap_byte_size() and
+  // max_heap_byte_size() to setup_heap_region_size() but those have
+  // not been set up at this point since they should be aligned with
+  // the region size. So, there is a circular dependency here. We base
+  // the region size on the heap size, but the heap size should be
+  // aligned with the region size. To get around this we use the
+  // unaligned values for the heap.
+  HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
   HeapRegionRemSet::setup_remset_size();
 
   G1ErgoVerbose::initialize();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -149,18 +149,11 @@
 // many regions in the heap (based on the min heap size).
 #define TARGET_REGION_NUMBER          2048
 
-void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
-  // region_size in bytes
+void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   uintx region_size = G1HeapRegionSize;
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
-    // We base the automatic calculation on the min heap size. This
-    // can be problematic if the spread between min and max is quite
-    // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
-    // the max size, the region size might be way too large for the
-    // min size. Either way, some users might have to set the region
-    // size manually for some -Xms / -Xmx combos.
-
-    region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
+    size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
+    region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
                        (uintx) MIN_REGION_SIZE);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -361,7 +361,7 @@
   // CardsPerRegion). All those fields are considered constant
   // throughout the JVM's execution, therefore they should only be set
   // up once during initialization time.
-  static void setup_heap_region_size(uintx min_heap_size);
+  static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
 
   enum ClaimValues {
     InitialClaimValue          = 0,
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
+#include "runtime/os.hpp"
 #include "trace/tracing.hpp"
 #include "trace/traceBackend.hpp"
 #if INCLUDE_ALL_GCS
@@ -54,11 +55,12 @@
 }
 
 void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
-  EventGCReferenceStatistics e;
+  EventGCReferenceStatistics e(UNTIMED);
   if (e.should_commit()) {
       e.set_gcId(_shared_gc_info.id());
       e.set_type((u1)type);
       e.set_count(count);
+      e.set_endtime(os::elapsed_counter());
       e.commit();
   }
 }
@@ -105,20 +107,22 @@
 }
 
 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
-  EventPromotionFailed e;
+  EventPromotionFailed e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
     e.set_data(to_trace_struct(pf_info));
     e.set_thread(pf_info.thread()->thread_id());
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
 
 // Common to CMS and G1
 void OldGCTracer::send_concurrent_mode_failure_event() {
-  EventConcurrentModeFailure e;
+  EventConcurrentModeFailure e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
@@ -136,7 +140,7 @@
 }
 
 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
-  EventEvacuationInfo e;
+  EventEvacuationInfo e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
     e.set_cSetRegions(info->collectionset_regions());
@@ -147,15 +151,17 @@
     e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
     e.set_bytesCopied(info->bytes_copied());
     e.set_regionsFreed(info->regions_freed());
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
 
 void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
-  EventEvacuationFailed e;
+  EventEvacuationFailed e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
     e.set_data(to_trace_struct(ef_info));
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
@@ -189,12 +195,13 @@
   void visit(const GCHeapSummary* heap_summary) const {
     const VirtualSpaceSummary& heap_space = heap_summary->heap();
 
-    EventGCHeapSummary e;
+    EventGCHeapSummary e(UNTIMED);
     if (e.should_commit()) {
       e.set_gcId(_id);
       e.set_when((u1)_when);
       e.set_heapSpace(to_trace_struct(heap_space));
       e.set_heapUsed(heap_summary->used());
+      e.set_endtime(os::elapsed_counter());
       e.commit();
     }
   }
@@ -209,7 +216,7 @@
     const SpaceSummary& from_space = ps_heap_summary->from();
     const SpaceSummary& to_space = ps_heap_summary->to();
 
-    EventPSHeapSummary e;
+    EventPSHeapSummary e(UNTIMED);
     if (e.should_commit()) {
       e.set_gcId(_id);
       e.set_when((u1)_when);
@@ -220,6 +227,7 @@
       e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
       e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
       e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
+      e.set_endtime(os::elapsed_counter());
       e.commit();
     }
   }
@@ -241,13 +249,14 @@
 }
 
 void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
-  EventMetaspaceSummary e;
+  EventMetaspaceSummary e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
     e.set_when((u1) when);
     e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
     e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
     e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
@@ -282,8 +291,6 @@
       default: /* Ignore sending this phase */ break;
     }
   }
-
-#undef send_phase
 };
 
 void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,9 +144,9 @@
     _padded_avg(0.0), _deviation(0.0), _padding(padding) {}
 
   // Placement support
-  void* operator new(size_t ignored, void* p) { return p; }
+  void* operator new(size_t ignored, void* p) throw() { return p; }
   // Allocator
-  void* operator new(size_t size) { return CHeapObj<mtGC>::operator new(size); }
+  void* operator new(size_t size) throw() { return CHeapObj<mtGC>::operator new(size); }
 
   // Accessor
   float padded_average() const         { return _padded_avg; }
--- a/hotspot/src/share/vm/libadt/port.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/libadt/port.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -163,8 +163,8 @@
 extern void *safe_calloc (const char *file, unsigned line, unsigned nitems, unsigned size);
 extern void *safe_realloc(const char *file, unsigned line, void *ptr, unsigned size);
 extern char *safe_strdup (const char *file, unsigned line, const char *src);
-inline void *operator new( size_t size ) { return malloc(size); }
-inline void operator delete( void *ptr ) { free(ptr); }
+inline void *operator new( size_t size ) throw() { return malloc(size); }
+inline void operator delete( void *ptr )         { free(ptr); }
 #endif
 
 //-----------------------------------------------------------------------------
--- a/hotspot/src/share/vm/memory/allocation.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -49,19 +49,19 @@
 # include "os_bsd.inline.hpp"
 #endif
 
-void* StackObj::operator new(size_t size)       { ShouldNotCallThis(); return 0; }
-void  StackObj::operator delete(void* p)        { ShouldNotCallThis(); }
-void* StackObj::operator new [](size_t size)    { ShouldNotCallThis(); return 0; }
-void  StackObj::operator delete [](void* p)     { ShouldNotCallThis(); }
+void* StackObj::operator new(size_t size)     throw() { ShouldNotCallThis(); return 0; }
+void  StackObj::operator delete(void* p)              { ShouldNotCallThis(); }
+void* StackObj::operator new [](size_t size)  throw() { ShouldNotCallThis(); return 0; }
+void  StackObj::operator delete [](void* p)           { ShouldNotCallThis(); }
 
-void* _ValueObj::operator new(size_t size)      { ShouldNotCallThis(); return 0; }
-void  _ValueObj::operator delete(void* p)       { ShouldNotCallThis(); }
-void* _ValueObj::operator new [](size_t size)   { ShouldNotCallThis(); return 0; }
-void  _ValueObj::operator delete [](void* p)    { ShouldNotCallThis(); }
+void* _ValueObj::operator new(size_t size)    throw() { ShouldNotCallThis(); return 0; }
+void  _ValueObj::operator delete(void* p)             { ShouldNotCallThis(); }
+void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
+void  _ValueObj::operator delete [](void* p)          { ShouldNotCallThis(); }
 
 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
                                  size_t word_size, bool read_only,
-                                 MetaspaceObj::Type type, TRAPS) {
+                                 MetaspaceObj::Type type, TRAPS) throw() {
   // Klass has it's own operator new
   return Metaspace::allocate(loader_data, word_size, read_only,
                              type, CHECK_NULL);
@@ -80,7 +80,7 @@
   st->print(" {"INTPTR_FORMAT"}", this);
 }
 
-void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
+void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
   address res;
   switch (type) {
    case C_HEAP:
@@ -97,12 +97,12 @@
   return res;
 }
 
-void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) {
+void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
   return (address) operator new(size, type, flags);
 }
 
 void* ResourceObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
-    allocation_type type, MEMFLAGS flags) {
+    allocation_type type, MEMFLAGS flags) throw() {
   //should only call this with std::nothrow, use other operator new() otherwise
   address res;
   switch (type) {
@@ -121,7 +121,7 @@
 }
 
 void* ResourceObj::operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
-    allocation_type type, MEMFLAGS flags) {
+    allocation_type type, MEMFLAGS flags) throw() {
   return (address)operator new(size, nothrow_constant, type, flags);
 }
 
@@ -370,7 +370,7 @@
 //--------------------------------------------------------------------------------------
 // Chunk implementation
 
-void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
+void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
   // requested_size is equal to sizeof(Chunk) but in order for the arena
   // allocations to come out aligned as expected the size must be aligned
   // to expected arena alignment.
@@ -478,18 +478,18 @@
   NOT_PRODUCT(Atomic::dec(&_instance_count);)
 }
 
-void* Arena::operator new(size_t size) {
+void* Arena::operator new(size_t size) throw() {
   assert(false, "Use dynamic memory type binding");
   return NULL;
 }
 
-void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
+void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) throw() {
   assert(false, "Use dynamic memory type binding");
   return NULL;
 }
 
   // dynamic memory type binding
-void* Arena::operator new(size_t size, MEMFLAGS flags) {
+void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
 #ifdef ASSERT
   void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
@@ -499,7 +499,7 @@
 #endif
 }
 
-void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
+void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
 #ifdef ASSERT
   void* p = os::malloc(size, flags|otArena, CALLER_PC);
   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
@@ -688,22 +688,22 @@
 // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed.
 //
 #ifndef ALLOW_OPERATOR_NEW_USAGE
-void* operator new(size_t size){
+void* operator new(size_t size) throw() {
   assert(false, "Should not call global operator new");
   return 0;
 }
 
-void* operator new [](size_t size){
+void* operator new [](size_t size) throw() {
   assert(false, "Should not call global operator new[]");
   return 0;
 }
 
-void* operator new(size_t size, const std::nothrow_t&  nothrow_constant){
+void* operator new(size_t size, const std::nothrow_t&  nothrow_constant) throw() {
   assert(false, "Should not call global operator new");
   return 0;
 }
 
-void* operator new [](size_t size, std::nothrow_t&  nothrow_constant){
+void* operator new [](size_t size, std::nothrow_t&  nothrow_constant) throw() {
   assert(false, "Should not call global operator new[]");
   return 0;
 }
--- a/hotspot/src/share/vm/memory/allocation.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -204,12 +204,12 @@
 
 template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
+  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
   _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
-                               address caller_pc = 0);
-  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0);
+                               address caller_pc = 0) throw();
+  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
   _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
-                               address caller_pc = 0);
+                               address caller_pc = 0) throw();
   void  operator delete(void* p);
   void  operator delete [] (void* p);
 };
@@ -219,9 +219,9 @@
 
 class StackObj ALLOCATION_SUPER_CLASS_SPEC {
  private:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void  operator delete(void* p);
-  void* operator new [](size_t size);
+  void* operator new [](size_t size) throw();
   void  operator delete [](void* p);
 };
 
@@ -245,9 +245,9 @@
 //
 class _ValueObj {
  private:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void  operator delete(void* p);
-  void* operator new [](size_t size);
+  void* operator new [](size_t size) throw();
   void  operator delete [](void* p);
 };
 
@@ -316,7 +316,7 @@
 
   void* operator new(size_t size, ClassLoaderData* loader_data,
                      size_t word_size, bool read_only,
-                     Type type, Thread* thread);
+                     Type type, Thread* thread) throw();
                      // can't use TRAPS from this header file.
   void operator delete(void* p) { ShouldNotCallThis(); }
 };
@@ -339,7 +339,7 @@
   Chunk*       _next;     // Next Chunk in list
   const size_t _len;      // Size of this Chunk
  public:
-  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
+  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
   void  operator delete(void* p);
   Chunk(size_t length);
 
@@ -422,12 +422,12 @@
   char* hwm() const             { return _hwm; }
 
   // new operators
-  void* operator new (size_t size);
-  void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
+  void* operator new (size_t size) throw();
+  void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
 
   // dynamic memory type tagging
-  void* operator new(size_t size, MEMFLAGS flags);
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
+  void* operator new(size_t size, MEMFLAGS flags) throw();
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
   void  operator delete(void* p);
 
   // Fast allocate in the arena.  Common case is: pointer test + increment.
@@ -583,44 +583,44 @@
 #endif // ASSERT
 
  public:
-  void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
-  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags);
+  void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw();
+  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw();
   void* operator new(size_t size, const std::nothrow_t&  nothrow_constant,
-      allocation_type type, MEMFLAGS flags);
+      allocation_type type, MEMFLAGS flags) throw();
   void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
-      allocation_type type, MEMFLAGS flags);
+      allocation_type type, MEMFLAGS flags) throw();
 
-  void* operator new(size_t size, Arena *arena) {
+  void* operator new(size_t size, Arena *arena) throw() {
       address res = (address)arena->Amalloc(size);
       DEBUG_ONLY(set_allocation_type(res, ARENA);)
       return res;
   }
 
-  void* operator new [](size_t size, Arena *arena) {
+  void* operator new [](size_t size, Arena *arena) throw() {
       address res = (address)arena->Amalloc(size);
       DEBUG_ONLY(set_allocation_type(res, ARENA);)
       return res;
   }
 
-  void* operator new(size_t size) {
+  void* operator new(size_t size) throw() {
       address res = (address)resource_allocate_bytes(size);
       DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
 
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
       address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
       DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
 
-  void* operator new [](size_t size) {
+  void* operator new [](size_t size) throw() {
       address res = (address)resource_allocate_bytes(size);
       DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
 
-  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) {
+  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() {
       address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
       DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
       return res;
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -85,7 +85,7 @@
 
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
-      address caller_pc){
+      address caller_pc) throw() {
     void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
 #ifdef ASSERT
     if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
@@ -94,7 +94,7 @@
   }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
-  const std::nothrow_t&  nothrow_constant, address caller_pc) {
+  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
   void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
       AllocFailStrategy::RETURN_NULL);
 #ifdef ASSERT
@@ -104,12 +104,12 @@
 }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
-      address caller_pc){
+      address caller_pc) throw() {
     return CHeapObj<F>::operator new(size, caller_pc);
 }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
-  const std::nothrow_t&  nothrow_constant, address caller_pc) {
+  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
     return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
 }
 
--- a/hotspot/src/share/vm/memory/filemap.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -55,6 +55,7 @@
               " shared archive file.\n");
   jio_vfprintf(defaultStream::error_stream(), msg, ap);
   jio_fprintf(defaultStream::error_stream(), "\n");
+  // Do not change the text of the below message because some tests check for it.
   vm_exit_during_initialization("Unable to use shared archive.", NULL);
 }
 
--- a/hotspot/src/share/vm/memory/memRegion.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/memory/memRegion.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,11 +102,11 @@
   return MemRegion();
 }
 
-void* MemRegion::operator new(size_t size) {
+void* MemRegion::operator new(size_t size) throw() {
   return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
 }
 
-void* MemRegion::operator new [](size_t size) {
+void* MemRegion::operator new [](size_t size) throw() {
   return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
 }
 void  MemRegion::operator delete(void* p) {
--- a/hotspot/src/share/vm/memory/memRegion.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/memory/memRegion.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,8 +94,8 @@
   size_t word_size() const { return _word_size; }
 
   bool is_empty() const { return word_size() == 0; }
-  void* operator new(size_t size);
-  void* operator new [](size_t size);
+  void* operator new(size_t size) throw();
+  void* operator new [](size_t size) throw();
   void  operator delete(void* p);
   void  operator delete [](void* p);
 };
@@ -111,13 +111,13 @@
 
 class MemRegionClosureRO: public MemRegionClosure {
 public:
-  void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) {
+  void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) throw() {
         return ResourceObj::operator new(size, type, flags);
   }
-  void* operator new(size_t size, Arena *arena) {
+  void* operator new(size_t size, Arena *arena) throw() {
         return ResourceObj::operator new(size, arena);
   }
-  void* operator new(size_t size) {
+  void* operator new(size_t size) throw() {
         return ResourceObj::operator new(size);
   }
 
--- a/hotspot/src/share/vm/oops/klass.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -139,7 +139,7 @@
   return NULL;
 }
 
-void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
+void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
   return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
                              MetaspaceObj::ClassType, CHECK_NULL);
 }
--- a/hotspot/src/share/vm/oops/klass.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -179,7 +179,7 @@
   // Constructor
   Klass();
 
-  void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS);
+  void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw();
 
  public:
   bool is_klass() const volatile { return true; }
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -720,11 +720,22 @@
   }
 }
 
+bool Method::is_always_compilable() const {
+  // Generated adapters must be compiled
+  if (is_method_handle_intrinsic() && is_synthetic()) {
+    assert(!is_not_c1_compilable(), "sanity check");
+    assert(!is_not_c2_compilable(), "sanity check");
+    return true;
+  }
+
+  return false;
+}
+
 bool Method::is_not_compilable(int comp_level) const {
   if (number_of_breakpoints() > 0)
     return true;
-  if (is_method_handle_intrinsic())
-    return !is_synthetic();  // the generated adapters must be compiled
+  if (is_always_compilable())
+    return false;
   if (comp_level == CompLevel_any)
     return is_not_c1_compilable() || is_not_c2_compilable();
   if (is_c1_compile(comp_level))
@@ -736,6 +747,10 @@
 
 // call this when compiler finds that this method is not compilable
 void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
+  if (is_always_compilable()) {
+    // Don't mark a method which should be always compilable
+    return;
+  }
   print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
   if (comp_level == CompLevel_all) {
     set_not_c1_compilable();
--- a/hotspot/src/share/vm/oops/method.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -796,6 +796,7 @@
   void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
     set_not_osr_compilable(comp_level, false);
   }
+  bool is_always_compilable() const;
 
  private:
   void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
--- a/hotspot/src/share/vm/oops/symbol.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/oops/symbol.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -41,19 +41,19 @@
   }
 }
 
-void* Symbol::operator new(size_t sz, int len, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, TRAPS) throw() {
   int alloc_size = size(len)*HeapWordSize;
   address res = (address) AllocateHeap(alloc_size, mtSymbol);
   return res;
 }
 
-void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) throw() {
   int alloc_size = size(len)*HeapWordSize;
   address res = (address)arena->Amalloc(alloc_size);
   return res;
 }
 
-void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) throw() {
   address res;
   int alloc_size = size(len)*HeapWordSize;
   res = (address) Metaspace::allocate(loader_data, size(len), true,
--- a/hotspot/src/share/vm/oops/symbol.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/oops/symbol.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -136,9 +136,9 @@
   }
 
   Symbol(const u1* name, int length, int refcount);
-  void* operator new(size_t size, int len, TRAPS);
-  void* operator new(size_t size, int len, Arena* arena, TRAPS);
-  void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS);
+  void* operator new(size_t size, int len, TRAPS) throw();
+  void* operator new(size_t size, int len, Arena* arena, TRAPS) throw();
+  void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS) throw();
 
   void  operator delete(void* p);
 
--- a/hotspot/src/share/vm/opto/block.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/block.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -112,9 +112,9 @@
 // exceeds OptoLoopAlignment.
 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
                                     PhaseRegAlloc* ra) {
-  uint last_inst = _nodes.size();
+  uint last_inst = number_of_nodes();
   for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
-    uint inst_size = _nodes[j]->size(ra);
+    uint inst_size = get_node(j)->size(ra);
     if( inst_size > 0 ) {
       inst_cnt--;
       uint sz = sum_size + inst_size;
@@ -131,8 +131,8 @@
 }
 
 uint Block::find_node( const Node *n ) const {
-  for( uint i = 0; i < _nodes.size(); i++ ) {
-    if( _nodes[i] == n )
+  for( uint i = 0; i < number_of_nodes(); i++ ) {
+    if( get_node(i) == n )
       return i;
   }
   ShouldNotReachHere();
@@ -141,7 +141,7 @@
 
 // Find and remove n from block list
 void Block::find_remove( const Node *n ) {
-  _nodes.remove(find_node(n));
+  remove_node(find_node(n));
 }
 
 // Return empty status of a block.  Empty blocks contain only the head, other
@@ -154,10 +154,10 @@
   }
 
   int success_result = completely_empty;
-  int end_idx = _nodes.size()-1;
+  int end_idx = number_of_nodes() - 1;
 
   // Check for ending goto
-  if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
+  if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
     success_result = empty_with_goto;
     end_idx--;
   }
@@ -170,7 +170,7 @@
   // Ideal nodes are allowable in empty blocks: skip them  Only MachNodes
   // turn directly into code, because only MachNodes have non-trivial
   // emit() functions.
-  while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
+  while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
     end_idx--;
   }
 
@@ -209,15 +209,15 @@
 
 // True if block is low enough frequency or guarded by a test which
 // mostly does not go here.
-bool Block::is_uncommon(PhaseCFG* cfg) const {
+bool PhaseCFG::is_uncommon(const Block* block) {
   // Initial blocks must never be moved, so are never uncommon.
-  if (head()->is_Root() || head()->is_Start())  return false;
+  if (block->head()->is_Root() || block->head()->is_Start())  return false;
 
   // Check for way-low freq
-  if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
+  if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
 
   // Look for code shape indicating uncommon_trap or slow path
-  if (has_uncommon_code()) return true;
+  if (block->has_uncommon_code()) return true;
 
   const float epsilon = 0.05f;
   const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
@@ -225,8 +225,8 @@
   uint freq_preds = 0;
   uint uncommon_for_freq_preds = 0;
 
-  for( uint i=1; i<num_preds(); i++ ) {
-    Block* guard = cfg->get_block_for_node(pred(i));
+  for( uint i=1; i< block->num_preds(); i++ ) {
+    Block* guard = get_block_for_node(block->pred(i));
     // Check to see if this block follows its guard 1 time out of 10000
     // or less.
     //
@@ -244,14 +244,14 @@
       uncommon_preds++;
     } else {
       freq_preds++;
-      if( _freq < guard->_freq * guard_factor ) {
+      if(block->_freq < guard->_freq * guard_factor ) {
         uncommon_for_freq_preds++;
       }
     }
   }
-  if( num_preds() > 1 &&
+  if( block->num_preds() > 1 &&
       // The block is uncommon if all preds are uncommon or
-      (uncommon_preds == (num_preds()-1) ||
+      (uncommon_preds == (block->num_preds()-1) ||
       // it is uncommon for all frequent preds.
        uncommon_for_freq_preds == freq_preds) ) {
     return true;
@@ -344,8 +344,8 @@
 
 void Block::dump(const PhaseCFG* cfg) const {
   dump_head(cfg);
-  for (uint i=0; i< _nodes.size(); i++) {
-    _nodes[i]->dump();
+  for (uint i=0; i< number_of_nodes(); i++) {
+    get_node(i)->dump();
   }
   tty->print("\n");
 }
@@ -434,7 +434,7 @@
       map_node_to_block(p, bb);
       map_node_to_block(x, bb);
       if( x != p ) {                // Only for root is x == p
-        bb->_nodes.push((Node*)x);
+        bb->push_node((Node*)x);
       }
       // Now handle predecessors
       ++sum;                        // Count 1 for self block
@@ -469,11 +469,11 @@
         assert( x != proj, "" );
         // Map basic block of projection
         map_node_to_block(proj, pb);
-        pb->_nodes.push(proj);
+        pb->push_node(proj);
       }
       // Insert self as a child of my predecessor block
       pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
-      assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
+      assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
               "too many control users, not a CFG?" );
     }
   }
@@ -495,7 +495,7 @@
   // surrounding blocks.
   float freq = in->_freq * in->succ_prob(succ_no);
   // get ProjNode corresponding to the succ_no'th successor of the in block
-  ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
+  ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
   // create region for basic block
   RegionNode* region = new (C) RegionNode(2);
   region->init_req(1, proj);
@@ -507,7 +507,7 @@
   Node* gto = _goto->clone(); // get a new goto node
   gto->set_req(0, region);
   // add it to the basic block
-  block->_nodes.push(gto);
+  block->push_node(gto);
   map_node_to_block(gto, block);
   C->regalloc()->set_bad(gto->_idx);
   // hook up successor block
@@ -527,9 +527,9 @@
 // Does this block end in a multiway branch that cannot have the default case
 // flipped for another case?
 static bool no_flip_branch( Block *b ) {
-  int branch_idx = b->_nodes.size() - b->_num_succs-1;
+  int branch_idx = b->number_of_nodes() - b->_num_succs-1;
   if( branch_idx < 1 ) return false;
-  Node *bra = b->_nodes[branch_idx];
+  Node *bra = b->get_node(branch_idx);
   if( bra->is_Catch() )
     return true;
   if( bra->is_Mach() ) {
@@ -550,16 +550,16 @@
 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
   // Find true target
   int end_idx = b->end_idx();
-  int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
+  int idx = b->get_node(end_idx+1)->as_Proj()->_con;
   Block *succ = b->_succs[idx];
   Node* gto = _goto->clone(); // get a new goto node
   gto->set_req(0, b->head());
-  Node *bp = b->_nodes[end_idx];
-  b->_nodes.map(end_idx,gto); // Slam over NeverBranch
+  Node *bp = b->get_node(end_idx);
+  b->map_node(gto, end_idx); // Slam over NeverBranch
   map_node_to_block(gto, b);
   C->regalloc()->set_bad(gto->_idx);
-  b->_nodes.pop();              // Yank projections
-  b->_nodes.pop();              // Yank projections
+  b->pop_node();              // Yank projections
+  b->pop_node();              // Yank projections
   b->_succs.map(0,succ);        // Map only successor
   b->_num_succs = 1;
   // remap successor's predecessors if necessary
@@ -575,8 +575,8 @@
   // Scan through block, yanking dead path from
   // all regions and phis.
   dead->head()->del_req(j);
-  for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
-    dead->_nodes[k]->del_req(j);
+  for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
+    dead->get_node(k)->del_req(j);
 }
 
 // Helper function to move block bx to the slot following b_index. Return
@@ -620,7 +620,7 @@
   if (e != Block::not_empty) {
     if (e == Block::empty_with_goto) {
       // Remove the goto, but leave the block.
-      b->_nodes.pop();
+      b->pop_node();
     }
     // Mark this block as a connector block, which will cause it to be
     // ignored in certain functions such as non_connector_successor().
@@ -663,13 +663,13 @@
     // to give a fake exit path to infinite loops.  At this late stage they
     // need to turn into Goto's so that when you enter the infinite loop you
     // indeed hang.
-    if (block->_nodes[block->end_idx()]->Opcode() == Op_NeverBranch) {
+    if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
       convert_NeverBranch_to_Goto(block);
     }
 
     // Look for uncommon blocks and move to end.
     if (!C->do_freq_based_layout()) {
-      if (block->is_uncommon(this)) {
+      if (is_uncommon(block)) {
         move_to_end(block, i);
         last--;                   // No longer check for being uncommon!
         if (no_flip_branch(block)) { // Fall-thru case must follow?
@@ -720,9 +720,9 @@
     // exchange the true and false targets.
     if (no_flip_branch(block)) {
       // Find fall through case - if must fall into its target
-      int branch_idx = block->_nodes.size() - block->_num_succs;
+      int branch_idx = block->number_of_nodes() - block->_num_succs;
       for (uint j2 = 0; j2 < block->_num_succs; j2++) {
-        const ProjNode* p = block->_nodes[branch_idx + j2]->as_Proj();
+        const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
         if (p->_con == 0) {
           // successor j2 is fall through case
           if (block->non_connector_successor(j2) != bnext) {
@@ -743,14 +743,14 @@
 
       // Remove all CatchProjs
       for (uint j = 0; j < block->_num_succs; j++) {
-        block->_nodes.pop();
+        block->pop_node();
       }
 
     } else if (block->_num_succs == 1) {
       // Block ends in a Goto?
       if (bnext == bs0) {
         // We fall into next block; remove the Goto
-        block->_nodes.pop();
+        block->pop_node();
       }
 
     } else if(block->_num_succs == 2) { // Block ends in a If?
@@ -759,9 +759,9 @@
       //       be projections (in any order), the 3rd last node must be
       //       the IfNode (we have excluded other 2-way exits such as
       //       CatchNodes already).
-      MachNode* iff   = block->_nodes[block->_nodes.size() - 3]->as_Mach();
-      ProjNode* proj0 = block->_nodes[block->_nodes.size() - 2]->as_Proj();
-      ProjNode* proj1 = block->_nodes[block->_nodes.size() - 1]->as_Proj();
+      MachNode* iff   = block->get_node(block->number_of_nodes() - 3)->as_Mach();
+      ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
+      ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
 
       // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
       assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
@@ -833,8 +833,8 @@
         iff->as_MachIf()->negate();
       }
 
-      block->_nodes.pop();          // Remove IfFalse & IfTrue projections
-      block->_nodes.pop();
+      block->pop_node();          // Remove IfFalse & IfTrue projections
+      block->pop_node();
 
     } else {
       // Multi-exit block, e.g. a switch statement
@@ -895,13 +895,13 @@
   // Verify sane CFG
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    uint cnt = block->_nodes.size();
+    uint cnt = block->number_of_nodes();
     uint j;
     for (j = 0; j < cnt; j++)  {
-      Node *n = block->_nodes[j];
+      Node *n = block->get_node(j);
       assert(get_block_for_node(n) == block, "");
       if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
-        assert(j == 1 || block->_nodes[j-1]->is_Phi(), "CreateEx must be first instruction in block");
+        assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
       }
       for (uint k = 0; k < n->req(); k++) {
         Node *def = n->in(k);
@@ -930,14 +930,14 @@
     }
 
     j = block->end_idx();
-    Node* bp = (Node*)block->_nodes[block->_nodes.size() - 1]->is_block_proj();
+    Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
     assert(bp, "last instruction must be a block proj");
-    assert(bp == block->_nodes[j], "wrong number of successors for this block");
+    assert(bp == block->get_node(j), "wrong number of successors for this block");
     if (bp->is_Catch()) {
-      while (block->_nodes[--j]->is_MachProj()) {
+      while (block->get_node(--j)->is_MachProj()) {
         ;
       }
-      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
+      assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
     } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
       assert(block->_num_succs == 2, "Conditional branch must have two targets");
     }
@@ -1440,9 +1440,9 @@
           Block *bnext = next(b);
           Block *bs0 = b->non_connector_successor(0);
 
-          MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
-          ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
-          ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
+          MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
+          ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
+          ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
 
           if (bnext == bs0) {
             // Fall-thru case in succs[0], should be in succs[1]
@@ -1454,8 +1454,8 @@
             b->_succs.map( 1, tbs0 );
 
             // Flip projections to match targets
-            b->_nodes.map(b->_nodes.size()-2, proj1);
-            b->_nodes.map(b->_nodes.size()-1, proj0);
+            b->map_node(proj1, b->number_of_nodes() - 2);
+            b->map_node(proj0, b->number_of_nodes() - 1);
           }
         }
       }
--- a/hotspot/src/share/vm/opto/block.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/block.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -105,15 +105,53 @@
 // any optimization pass.  They are created late in the game.
 class Block : public CFGElement {
   friend class VMStructs;
- public:
+
+private:
   // Nodes in this block, in order
   Node_List _nodes;
 
+public:
+
+  // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
+  Node* get_node(uint at_index) const {
+    return _nodes[at_index];
+  }
+
+  // Get the number of nodes in this block
+  uint number_of_nodes() const {
+    return _nodes.size();
+  }
+
+  // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
+  void map_node(Node* node, uint to_index) {
+    _nodes.map(to_index, node);
+  }
+
+  // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
+  void insert_node(Node* node, uint at_index) {
+    _nodes.insert(at_index, node);
+  }
+
+  // Remove a node at index 'at_index'
+  void remove_node(uint at_index) {
+    _nodes.remove(at_index);
+  }
+
+  // Push a node 'node' onto the node list
+  void push_node(Node* node) {
+    _nodes.push(node);
+  }
+
+  // Pop the last node off the node list
+  Node* pop_node() {
+    return _nodes.pop();
+  }
+
   // Basic blocks have a Node which defines Control for all Nodes pinned in
   // this block.  This Node is a RegionNode.  Exception-causing Nodes
   // (division, subroutines) and Phi functions are always pinned.  Later,
   // every Node will get pinned to some block.
-  Node *head() const { return _nodes[0]; }
+  Node *head() const { return get_node(0); }
 
   // CAUTION: num_preds() is ONE based, so that predecessor numbers match
   // input edges to Regions and Phis.
@@ -274,29 +312,12 @@
 
   // Add an instruction to an existing block.  It must go after the head
   // instruction and before the end instruction.
-  void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
+  void add_inst( Node *n ) { insert_node(n, end_idx()); }
   // Find node in block
   uint find_node( const Node *n ) const;
   // Find and remove n from block list
   void find_remove( const Node *n );
 
-  // helper function that adds caller save registers to MachProjNode
-  void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
-  // Schedule a call next in the block
-  uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
-
-  // Perform basic-block local scheduling
-  Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
-  void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
-  void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
-  bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
-  // Cleanup if any code lands between a Call and his Catch
-  void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
-  // Detect implicit-null-check opportunities.  Basically, find NULL checks
-  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
-  // I can generate a memory op if there is not one nearby.
-  void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
-
   // Return the empty status of a block
   enum { not_empty, empty_with_goto, completely_empty };
   int is_Empty() const;
@@ -328,10 +349,6 @@
   // Examine block's code shape to predict if it is not commonly executed.
   bool has_uncommon_code() const;
 
-  // Use frequency calculations and code shape to predict if the block
-  // is uncommon.
-  bool is_uncommon(PhaseCFG* cfg) const;
-
 #ifndef PRODUCT
   // Debugging print of basic block
   void dump_bidx(const Block* orig, outputStream* st = tty) const;
@@ -414,6 +431,27 @@
   // to late. Helper for schedule_late.
   Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
 
+  bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
+  void set_next_call(Block* block, Node* n, VectorSet& next_call);
+  void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
+
+  // Perform basic-block local scheduling
+  Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
+
+  // Schedule a call next in the block
+  uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
+
+  // Cleanup if any code lands between a Call and his Catch
+  void call_catch_cleanup(Block* block);
+
+  Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
+  void  catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
+
+  // Detect implicit-null-check opportunities.  Basically, find NULL checks
+  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
+  // I can generate a memory op if there is not one nearby.
+  void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
+
   // Perform a Depth First Search (DFS).
   // Setup 'vertex' as DFS to vertex mapping.
   // Setup 'semi' as vertex to DFS mapping.
@@ -530,6 +568,10 @@
     return (_node_to_block_mapping.lookup(node->_idx) != NULL);
   }
 
+  // Use frequency calculations and code shape to predict if the block
+  // is uncommon.
+  bool is_uncommon(const Block* block);
+
 #ifdef ASSERT
   Unique_Node_List _raw_oops;
 #endif
@@ -550,7 +592,7 @@
 
   // Insert a node into a block at index and map the node to the block
   void insert(Block *b, uint idx, Node *n) {
-    b->_nodes.insert( idx, n );
+    b->insert_node(n , idx);
     map_node_to_block(n, b);
   }
 
--- a/hotspot/src/share/vm/opto/buildOopMap.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/buildOopMap.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -121,8 +121,8 @@
 // Given reaching-defs for this block start, compute it for this block end
 void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
 
-  for( uint i=0; i<_b->_nodes.size(); i++ ) {
-    Node *n = _b->_nodes[i];
+  for( uint i=0; i<_b->number_of_nodes(); i++ ) {
+    Node *n = _b->get_node(i);
 
     if( n->jvms() ) {           // Build an OopMap here?
       JVMState *jvms = n->jvms();
@@ -447,8 +447,8 @@
       }
 
       // Now walk tmp_live up the block backwards, computing live
-      for( int k=b->_nodes.size()-1; k>=0; k-- ) {
-        Node *n = b->_nodes[k];
+      for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
+        Node *n = b->get_node(k);
         // KILL def'd bits
         int first = regalloc->get_reg_first(n);
         int second = regalloc->get_reg_second(n);
@@ -544,12 +544,12 @@
     for (i = 1; i < cfg->number_of_blocks(); i++) {
       Block* block = cfg->get_block(i);
       uint j;
-      for (j = 1; j < block->_nodes.size(); j++) {
-        if (block->_nodes[j]->jvms() && (*safehash)[block->_nodes[j]] == NULL) {
+      for (j = 1; j < block->number_of_nodes(); j++) {
+        if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
            break;
         }
       }
-      if (j < block->_nodes.size()) {
+      if (j < block->number_of_nodes()) {
         break;
       }
     }
--- a/hotspot/src/share/vm/opto/callGenerator.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/callGenerator.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -260,7 +260,7 @@
   // Because WarmInfo objects live over the entire lifetime of the
   // Compile object, they are allocated into the comp_arena, which
   // does not get resource marked or reset during the compile process
-  void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
+  void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
   void operator delete( void * ) { } // fast deallocation
 
   static WarmCallInfo* always_hot();
--- a/hotspot/src/share/vm/opto/callnode.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -458,7 +458,7 @@
       st->print("={");
       uint nf = spobj->n_fields();
       if (nf > 0) {
-        uint first_ind = spobj->first_index();
+        uint first_ind = spobj->first_index(mcall->jvms());
         Node* fld_node = mcall->in(first_ind);
         ciField* cifield;
         if (iklass != NULL) {
@@ -1063,7 +1063,6 @@
   int scloff = jvms->scloff();
   int endoff = jvms->endoff();
   assert(endoff == (int)req(), "no other states or debug info after me");
-  assert(jvms->scl_size() == 0, "parsed code should not have scalar objects");
   Node* top = Compile::current()->top();
   for (uint i = 0; i < grow_by; i++) {
     ins_req(monoff, top);
@@ -1079,32 +1078,31 @@
   const int MonitorEdges = 2;
   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
   assert(req() == jvms()->endoff(), "correct sizing");
-  assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
   int nextmon = jvms()->scloff();
   if (GenerateSynchronizationCode) {
-    add_req(lock->box_node());
-    add_req(lock->obj_node());
+    ins_req(nextmon,   lock->box_node());
+    ins_req(nextmon+1, lock->obj_node());
   } else {
     Node* top = Compile::current()->top();
-    add_req(top);
-    add_req(top);
+    ins_req(nextmon, top);
+    ins_req(nextmon, top);
   }
-  jvms()->set_scloff(nextmon+MonitorEdges);
+  jvms()->set_scloff(nextmon + MonitorEdges);
   jvms()->set_endoff(req());
 }
 
 void SafePointNode::pop_monitor() {
   // Delete last monitor from debug info
-  assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
   debug_only(int num_before_pop = jvms()->nof_monitors());
-  const int MonitorEdges = (1<<JVMState::logMonitorEdges);
+  const int MonitorEdges = 2;
+  assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
   int scloff = jvms()->scloff();
   int endoff = jvms()->endoff();
   int new_scloff = scloff - MonitorEdges;
   int new_endoff = endoff - MonitorEdges;
   jvms()->set_scloff(new_scloff);
   jvms()->set_endoff(new_endoff);
-  while (scloff > new_scloff)  del_req(--scloff);
+  while (scloff > new_scloff)  del_req_ordered(--scloff);
   assert(jvms()->nof_monitors() == num_before_pop-1, "");
 }
 
@@ -1169,13 +1167,12 @@
 }
 
 SafePointScalarObjectNode*
-SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
+SafePointScalarObjectNode::clone(Dict* sosn_map) const {
   void* cached = (*sosn_map)[(void*)this];
   if (cached != NULL) {
     return (SafePointScalarObjectNode*)cached;
   }
   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
-  res->_first_index += jvms_adj;
   sosn_map->Insert((void*)this, (void*)res);
   return res;
 }
--- a/hotspot/src/share/vm/opto/callnode.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -216,7 +216,7 @@
   // Because JVMState objects live over the entire lifetime of the
   // Compile object, they are allocated into the comp_arena, which
   // does not get resource marked or reset during the compile process
-  void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
+  void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
   void operator delete( void * ) { } // fast deallocation
 
   // Create a new JVMState, ready for abstract interpretation.
@@ -449,14 +449,17 @@
 // at a safepoint.
 
 class SafePointScalarObjectNode: public TypeNode {
-  uint _first_index; // First input edge index of a SafePoint node where
+  uint _first_index; // First input edge relative index of a SafePoint node where
                      // states of the scalarized object fields are collected.
+                     // It is relative to the last (youngest) jvms->_scloff.
   uint _n_fields;    // Number of non-static fields of the scalarized object.
   DEBUG_ONLY(AllocateNode* _alloc;)
 
   virtual uint hash() const ; // { return NO_HASH; }
   virtual uint cmp( const Node &n ) const;
 
+  uint first_index() const { return _first_index; }
+
 public:
   SafePointScalarObjectNode(const TypeOopPtr* tp,
 #ifdef ASSERT
@@ -469,7 +472,10 @@
   virtual const RegMask &out_RegMask() const;
   virtual uint           match_edge(uint idx) const;
 
-  uint first_index() const { return _first_index; }
+  uint first_index(JVMState* jvms) const {
+    assert(jvms != NULL, "missed JVMS");
+    return jvms->scloff() + _first_index;
+  }
   uint n_fields()    const { return _n_fields; }
 
 #ifdef ASSERT
@@ -485,7 +491,7 @@
   // corresponds appropriately to "this" in "new_call".  Assumes that
   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
-  SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
+  SafePointScalarObjectNode* clone(Dict* sosn_map) const;
 
 #ifndef PRODUCT
   virtual void              dump_spec(outputStream *st) const;
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -301,7 +301,7 @@
       // Copy kill projections after the cloned node
       Node* kills = proj->clone();
       kills->set_req(0, copy);
-      b->_nodes.insert(idx++, kills);
+      b->insert_node(kills, idx++);
       _cfg.map_node_to_block(kills, b);
       new_lrg(kills, max_lrg_id++);
     }
@@ -682,11 +682,11 @@
   uint lr_counter = 1;
   for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
     Block* block = _cfg.get_block(i);
-    uint cnt = block->_nodes.size();
+    uint cnt = block->number_of_nodes();
 
     // Handle all the normal Nodes in the block
     for( uint j = 0; j < cnt; j++ ) {
-      Node *n = block->_nodes[j];
+      Node *n = block->get_node(j);
       // Pre-color to the zero live range, or pick virtual register
       const RegMask &rm = n->out_RegMask();
       _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
@@ -710,8 +710,8 @@
     Block* block = _cfg.get_block(i);
 
     // For all instructions
-    for (uint j = 1; j < block->_nodes.size(); j++) {
-      Node* n = block->_nodes[j];
+    for (uint j = 1; j < block->number_of_nodes(); j++) {
+      Node* n = block->get_node(j);
       uint input_edge_start =1; // Skip control most nodes
       if (n->is_Mach()) {
         input_edge_start = n->as_Mach()->oper_input_base();
@@ -1604,7 +1604,7 @@
     // For all instructions in block
     uint last_inst = block->end_idx();
     for (uint j = 1; j <= last_inst; j++) {
-      Node* n = block->_nodes[j];
+      Node* n = block->get_node(j);
 
       // Dead instruction???
       assert( n->outcnt() != 0 ||// Nothing dead after post alloc
@@ -1641,7 +1641,7 @@
             assert( cisc->oper_input_base() == 2, "Only adding one edge");
             cisc->ins_req(1,src);         // Requires a memory edge
           }
-          block->_nodes.map(j,cisc);          // Insert into basic block
+          block->map_node(cisc, j);          // Insert into basic block
           n->subsume_by(cisc, C); // Correct graph
           //
           ++_used_cisc_instructions;
@@ -1698,7 +1698,7 @@
       // (where top() node is placed).
       base->init_req(0, _cfg.get_root_node());
       Block *startb = _cfg.get_block_for_node(C->top());
-      startb->_nodes.insert(startb->find_node(C->top()), base );
+      startb->insert_node(base, startb->find_node(C->top()));
       _cfg.map_node_to_block(base, startb);
       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
     }
@@ -1743,9 +1743,9 @@
   // Search the current block for an existing base-Phi
   Block *b = _cfg.get_block_for_node(derived);
   for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
-    Node *phi = b->_nodes[i];
+    Node *phi = b->get_node(i);
     if( !phi->is_Phi() ) {      // Found end of Phis with no match?
-      b->_nodes.insert( i, base ); // Must insert created Phi here as base
+      b->insert_node(base,  i); // Must insert created Phi here as base
       _cfg.map_node_to_block(base, b);
       new_lrg(base,maxlrg++);
       break;
@@ -1786,7 +1786,7 @@
     IndexSet liveout(_live->live(block));
 
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
       // like to see in the same register.  Compare uses the loop-phi and so
@@ -1979,8 +1979,8 @@
   b->dump_head(&_cfg);
 
   // For all instructions
-  for( uint j = 0; j < b->_nodes.size(); j++ )
-    dump(b->_nodes[j]);
+  for( uint j = 0; j < b->number_of_nodes(); j++ )
+    dump(b->get_node(j));
   // Print live-out info at end of block
   if( _live ) {
     tty->print("Liveout: ");
@@ -2271,8 +2271,8 @@
     int dump_once = 0;
 
     // For all instructions
-    for( uint j = 0; j < block->_nodes.size(); j++ ) {
-      Node *n = block->_nodes[j];
+    for( uint j = 0; j < block->number_of_nodes(); j++ ) {
+      Node *n = block->get_node(j);
       if (_lrg_map.find_const(n) == lidx) {
         if (!dump_once++) {
           tty->cr();
--- a/hotspot/src/share/vm/opto/coalesce.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/coalesce.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -54,9 +54,9 @@
     for( j=0; j<b->_num_succs; j++ )
       tty->print("B%d ",b->_succs[j]->_pre_order);
     tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
-    uint cnt = b->_nodes.size();
+    uint cnt = b->number_of_nodes();
     for( j=0; j<cnt; j++ ) {
-      Node *n = b->_nodes[j];
+      Node *n = b->get_node(j);
       dump( n );
       tty->print("\t%s\t",n->Name());
 
@@ -152,7 +152,7 @@
   // after the last use.  Last use is really first-use on a backwards scan.
   uint i = b->end_idx()-1;
   while(1) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Check for end of virtual copies; this is also the end of the
     // parallel renaming effort.
     if (n->_idx < _unique) {
@@ -174,7 +174,7 @@
   // the last kill.  Thus it is the first kill on a backwards scan.
   i = b->end_idx()-1;
   while (1) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Check for end of virtual copies; this is also the end of the
     // parallel renaming effort.
     if (n->_idx < _unique) {
@@ -200,13 +200,13 @@
     tmp ->set_req(idx,copy->in(idx));
     copy->set_req(idx,tmp);
     // Save source in temp early, before source is killed
-    b->_nodes.insert(kill_src_idx,tmp);
+    b->insert_node(tmp, kill_src_idx);
     _phc._cfg.map_node_to_block(tmp, b);
     last_use_idx++;
   }
 
   // Insert just after last use
-  b->_nodes.insert(last_use_idx+1,copy);
+  b->insert_node(copy, last_use_idx + 1);
 }
 
 void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
@@ -237,8 +237,8 @@
     Block *b = _phc._cfg.get_block(i);
     uint cnt = b->num_preds();  // Number of inputs to the Phi
 
-    for( uint l = 1; l<b->_nodes.size(); l++ ) {
-      Node *n = b->_nodes[l];
+    for( uint l = 1; l<b->number_of_nodes(); l++ ) {
+      Node *n = b->get_node(l);
 
       // Do not use removed-copies, use copied value instead
       uint ncnt = n->req();
@@ -260,7 +260,7 @@
         if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
           n->replace_by(def);
           n->set_req(cidx,NULL);
-          b->_nodes.remove(l);
+          b->remove_node(l);
           l--;
           continue;
         }
@@ -321,13 +321,13 @@
                m->as_Mach()->rematerialize()) {
               copy = m->clone();
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert(l++, copy);
+              b->insert_node(copy, l++);
               l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
             } else {
               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
               copy = new (C) MachSpillCopyNode(m, *rm, *rm);
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert(l++, copy);
+              b->insert_node(copy, l++);
             }
             // Insert the copy in the use-def chain
             n->set_req(idx, copy);
@@ -339,7 +339,7 @@
         } // End of is two-adr
 
         // Insert a copy at a debug use for a lrg which has high frequency
-        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
+        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) {
           // Walk the debug inputs to the node and check for lrg freq
           JVMState* jvms = n->jvms();
           uint debug_start = jvms ? jvms->debug_start() : 999999;
@@ -376,7 +376,7 @@
               // Insert the copy in the use-def chain
               n->set_req(inpidx, copy );
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert( l++, copy );
+              b->insert_node(copy,  l++);
               // Extend ("register allocate") the names array for the copy.
               uint max_lrg_id = _phc._lrg_map.max_lrg_id();
               _phc.new_lrg(copy, max_lrg_id);
@@ -431,8 +431,8 @@
     }
 
     // Visit all the Phis in successor block
-    for( uint k = 1; k<bs->_nodes.size(); k++ ) {
-      Node *n = bs->_nodes[k];
+    for( uint k = 1; k<bs->number_of_nodes(); k++ ) {
+      Node *n = bs->get_node(k);
       if( !n->is_Phi() ) break;
       combine_these_two( n, n->in(j) );
     }
@@ -442,7 +442,7 @@
   // Check _this_ block for 2-address instructions and copies.
   uint cnt = b->end_idx();
   for( i = 1; i<cnt; i++ ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     uint idx;
     // 2-address instructions have a virtual Copy matching their input
     // to their output
@@ -490,10 +490,10 @@
   dst_copy->set_req( didx, src_def );
   // Add copy to free list
   // _phc.free_spillcopy(b->_nodes[bindex]);
-  assert( b->_nodes[bindex] == dst_copy, "" );
+  assert( b->get_node(bindex) == dst_copy, "" );
   dst_copy->replace_by( dst_copy->in(didx) );
   dst_copy->set_req( didx, NULL);
-  b->_nodes.remove(bindex);
+  b->remove_node(bindex);
   if( bindex < b->_ihrp_index ) b->_ihrp_index--;
   if( bindex < b->_fhrp_index ) b->_fhrp_index--;
 
@@ -523,8 +523,8 @@
       bindex2 = b2->end_idx()-1;
     }
     // Get prior instruction
-    assert(bindex2 < b2->_nodes.size(), "index out of bounds");
-    Node *x = b2->_nodes[bindex2];
+    assert(bindex2 < b2->number_of_nodes(), "index out of bounds");
+    Node *x = b2->get_node(bindex2);
     if( x == prev_copy ) {      // Previous copy in copy chain?
       if( prev_copy == src_copy)// Found end of chain and all interferences
         break;                  // So break out of loop
@@ -769,14 +769,14 @@
 // Conservative (but pessimistic) copy coalescing of a single block
 void PhaseConservativeCoalesce::coalesce( Block *b ) {
   // Bail out on infrequent blocks
-  if (b->is_uncommon(&_phc._cfg)) {
+  if (_phc._cfg.is_uncommon(b)) {
     return;
   }
   // Check this block for copies.
   for( uint i = 1; i<b->end_idx(); i++ ) {
     // Check for actual copies on inputs.  Coalesce a copy into its
     // input if use and copy's input are compatible.
-    Node *copy1 = b->_nodes[i];
+    Node *copy1 = b->get_node(i);
     uint idx1 = copy1->is_Copy();
     if( !idx1 ) continue;       // Not a copy
 
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -2258,7 +2258,7 @@
     if (block->is_connector() && !Verbose) {
       continue;
     }
-    n = block->_nodes[0];
+    n = block->head();
     if (pcs && n->_idx < pc_limit) {
       tty->print("%3.3x   ", pcs[n->_idx]);
     } else {
@@ -2273,12 +2273,12 @@
 
     // For all instructions
     Node *delay = NULL;
-    for (uint j = 0; j < block->_nodes.size(); j++) {
+    for (uint j = 0; j < block->number_of_nodes(); j++) {
       if (VMThread::should_terminate()) {
         cut_short = true;
         break;
       }
-      n = block->_nodes[j];
+      n = block->get_node(j);
       if (valid_bundle_info(n)) {
         Bundle* bundle = node_bundling(n);
         if (bundle->used_in_unconditional_delay()) {
--- a/hotspot/src/share/vm/opto/domgraph.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/domgraph.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -211,21 +211,21 @@
 uint Block_Stack::most_frequent_successor( Block *b ) {
   uint freq_idx = 0;
   int eidx = b->end_idx();
-  Node *n = b->_nodes[eidx];
+  Node *n = b->get_node(eidx);
   int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
   switch( op ) {
   case Op_CountedLoopEnd:
   case Op_If: {               // Split frequency amongst children
     float prob = n->as_MachIf()->_prob;
     // Is succ[0] the TRUE branch or the FALSE branch?
-    if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse )
+    if( b->get_node(eidx+1)->Opcode() == Op_IfFalse )
       prob = 1.0f - prob;
     freq_idx = prob < PROB_FAIR;      // freq=1 for succ[0] < 0.5 prob
     break;
   }
   case Op_Catch:                // Split frequency amongst children
     for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ )
-      if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index )
+      if( b->get_node(eidx+1+freq_idx)->as_CatchProj()->_con == CatchProjNode::fall_through_index )
         break;
     // Handle case of no fall-thru (e.g., check-cast MUST throw an exception)
     if( freq_idx == b->_num_succs ) freq_idx = 0;
--- a/hotspot/src/share/vm/opto/gcm.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -102,12 +102,12 @@
     uint j = 0;
     if (pb->_num_succs != 1) {  // More then 1 successor?
       // Search for successor
-      uint max = pb->_nodes.size();
+      uint max = pb->number_of_nodes();
       assert( max > 1, "" );
       uint start = max - pb->_num_succs;
       // Find which output path belongs to projection
       for (j = start; j < max; j++) {
-        if( pb->_nodes[j] == in0 )
+        if( pb->get_node(j) == in0 )
           break;
       }
       assert( j < max, "must find" );
@@ -1027,8 +1027,8 @@
   Block* least       = LCA;
   double least_freq  = least->_freq;
   uint target        = get_latency_for_node(self);
-  uint start_latency = get_latency_for_node(LCA->_nodes[0]);
-  uint end_latency   = get_latency_for_node(LCA->_nodes[LCA->end_idx()]);
+  uint start_latency = get_latency_for_node(LCA->head());
+  uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
   bool in_latency    = (target <= start_latency);
   const Block* root_block = get_block_for_node(_root);
 
@@ -1049,9 +1049,9 @@
     self->dump();
     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
       LCA->_pre_order,
-      LCA->_nodes[0]->_idx,
+      LCA->head()->_idx,
       start_latency,
-      LCA->_nodes[LCA->end_idx()]->_idx,
+      LCA->get_node(LCA->end_idx())->_idx,
       end_latency,
       least_freq);
   }
@@ -1074,14 +1074,14 @@
     if (mach && LCA == root_block)
       break;
 
-    uint start_lat = get_latency_for_node(LCA->_nodes[0]);
+    uint start_lat = get_latency_for_node(LCA->head());
     uint end_idx   = LCA->end_idx();
-    uint end_lat   = get_latency_for_node(LCA->_nodes[end_idx]);
+    uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
     double LCA_freq = LCA->_freq;
 #ifndef PRODUCT
     if (trace_opto_pipelining()) {
       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
-        LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
+        LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
     }
 #endif
     cand_cnt++;
@@ -1342,7 +1342,7 @@
       Node* proj = _matcher._null_check_tests[i];
       Node* val  = _matcher._null_check_tests[i + 1];
       Block* block = get_block_for_node(proj);
-      block->implicit_null_check(this, proj, val, allowed_reasons);
+      implicit_null_check(block, proj, val, allowed_reasons);
       // The implicit_null_check will only perform the transformation
       // if the null branch is truly uncommon, *and* it leads to an
       // uncommon trap.  Combined with the too_many_traps guards
@@ -1363,7 +1363,7 @@
   visited.Clear();
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    if (!block->schedule_local(this, _matcher, ready_cnt, visited)) {
+    if (!schedule_local(block, ready_cnt, visited)) {
       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
         C->record_method_not_compilable("local schedule failed");
       }
@@ -1375,7 +1375,7 @@
   // clone the instructions on all paths below the Catch.
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    block->call_catch_cleanup(this, C);
+    call_catch_cleanup(block);
   }
 
 #ifndef PRODUCT
@@ -1726,7 +1726,7 @@
 // Determine the probability of reaching successor 'i' from the receiver block.
 float Block::succ_prob(uint i) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
@@ -1761,7 +1761,7 @@
     float prob  = n->as_MachIf()->_prob;
     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
     // If succ[i] is the FALSE branch, invert path info
-    if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
+    if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
       return 1.0f - prob; // not taken
     } else {
       return prob; // taken
@@ -1773,7 +1773,7 @@
     return 1.0f/_num_succs;
 
   case Op_Catch: {
-    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
     if (ci->_con == CatchProjNode::fall_through_index) {
       // Fall-thru path gets the lion's share.
       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
@@ -1810,7 +1810,7 @@
 // Return the number of fall-through candidates for a block
 int Block::num_fall_throughs() {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
@@ -1834,7 +1834,7 @@
 
   case Op_Catch: {
     for (uint i = 0; i < _num_succs; i++) {
-      const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+      const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
       if (ci->_con == CatchProjNode::fall_through_index) {
         return 1;
       }
@@ -1862,14 +1862,14 @@
 // Return true if a specific successor could be fall-through target.
 bool Block::succ_fall_through(uint i) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
     if (n->is_MachNullCheck()) {
       // In theory, either side can fall-thru, for simplicity sake,
       // let's say only the false branch can now.
-      return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
+      return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
     }
     op = n->as_Mach()->ideal_Opcode();
   }
@@ -1883,7 +1883,7 @@
     return true;
 
   case Op_Catch: {
-    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
     return ci->_con == CatchProjNode::fall_through_index;
   }
 
@@ -1907,7 +1907,7 @@
 // Update the probability of a two-branch to be uncommon
 void Block::update_uncommon_branch(Block* ub) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->as_Mach()->ideal_Opcode();
 
@@ -1923,7 +1923,7 @@
 
   // If ub is the true path, make the proability small, else
   // ub is the false path, and make the probability large
-  bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
+  bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
 
   // Get existing probability
   float p = n->as_MachIf()->_prob;
--- a/hotspot/src/share/vm/opto/generateOptoStub.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/generateOptoStub.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -61,6 +61,7 @@
   JVMState* jvms = new (C) JVMState(0);
   jvms->set_bci(InvocationEntryBci);
   jvms->set_monoff(max_map);
+  jvms->set_scloff(max_map);
   jvms->set_endoff(max_map);
   {
     SafePointNode *map = new (C) SafePointNode( max_map, jvms );
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1501,6 +1501,25 @@
   }
 }
 
+bool GraphKit::can_move_pre_barrier() const {
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  switch (bs->kind()) {
+    case BarrierSet::G1SATBCT:
+    case BarrierSet::G1SATBCTLogging:
+      return true; // Can move it if no safepoint
+
+    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableExtension:
+    case BarrierSet::ModRef:
+      return true; // There is no pre-barrier
+
+    case BarrierSet::Other:
+    default      :
+      ShouldNotReachHere();
+  }
+  return false;
+}
+
 void GraphKit::post_barrier(Node* ctl,
                             Node* store,
                             Node* obj,
@@ -3551,6 +3570,8 @@
   } else {
     // In this case both val_type and alias_idx are unused.
     assert(pre_val != NULL, "must be loaded already");
+    // Nothing to be done if pre_val is null.
+    if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
   }
   assert(bt == T_OBJECT, "or we shouldn't be here");
@@ -3595,7 +3616,7 @@
     if (do_load) {
       // load original value
       // alias_idx correct??
-      pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
+      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
     }
 
     // if (pre_val != NULL)
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -695,6 +695,10 @@
   void write_barrier_post(Node *store, Node* obj,
                           Node* adr,  uint adr_idx, Node* val, bool use_precise);
 
+  // Allow reordering of pre-barrier with oop store and/or post-barrier.
+  // Used for load_store operations which loads old value.
+  bool can_move_pre_barrier() const;
+
   // G1 pre/post barriers
   void g1_write_barrier_pre(bool do_load,
                             Node* obj,
--- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -639,8 +639,8 @@
     // reachable but are in the CFG so add them here.
     for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
       Block* block = C->cfg()->get_block(i);
-      for (uint s = 0; s < block->_nodes.size(); s++) {
-        nodeStack.push(block->_nodes[s]);
+      for (uint s = 0; s < block->number_of_nodes(); s++) {
+        nodeStack.push(block->get_node(s));
       }
     }
   }
@@ -713,9 +713,9 @@
       tail(SUCCESSORS_ELEMENT);
 
       head(NODES_ELEMENT);
-      for (uint s = 0; s < block->_nodes.size(); s++) {
+      for (uint s = 0; s < block->number_of_nodes(); s++) {
         begin_elem(NODE_ELEMENT);
-        print_attr(NODE_ID_PROPERTY, get_node_id(block->_nodes[s]));
+        print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
         end_elem();
       }
       tail(NODES_ELEMENT);
--- a/hotspot/src/share/vm/opto/ifg.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/ifg.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -319,7 +319,7 @@
     // value is then removed from the live-ness set and it's inputs are
     // added to the live-ness set.
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Get value being defined
       uint r = _lrg_map.live_range_id(n);
@@ -456,7 +456,7 @@
     // Compute first nonphi node index
     uint first_inst;
     for (first_inst = 1; first_inst < last_inst; first_inst++) {
-      if (!block->_nodes[first_inst]->is_Phi()) {
+      if (!block->get_node(first_inst)->is_Phi()) {
         break;
       }
     }
@@ -464,15 +464,15 @@
     // Spills could be inserted before CreateEx node which should be
     // first instruction in block after Phis. Move CreateEx up.
     for (uint insidx = first_inst; insidx < last_inst; insidx++) {
-      Node *ex = block->_nodes[insidx];
+      Node *ex = block->get_node(insidx);
       if (ex->is_SpillCopy()) {
         continue;
       }
       if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
         // If the CreateEx isn't above all the MachSpillCopies
         // then move it to the top.
-        block->_nodes.remove(insidx);
-        block->_nodes.insert(first_inst, ex);
+        block->remove_node(insidx);
+        block->insert_node(ex, first_inst);
       }
       // Stop once a CreateEx or any other node is found
       break;
@@ -523,7 +523,7 @@
     // to the live-ness set.
     uint j;
     for (j = last_inst + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Get value being defined
       uint r = _lrg_map.live_range_id(n);
@@ -541,7 +541,7 @@
           if( !n->is_Proj() ||
               // Could also be a flags-projection of a dead ADD or such.
               (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
-            block->_nodes.remove(j - 1);
+            block->remove_node(j - 1);
             if (lrgs(r)._def == n) {
               lrgs(r)._def = 0;
             }
@@ -605,7 +605,7 @@
             // (j - 1) is index for current instruction 'n'
             Node *m = n;
             for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
-              m = block->_nodes[i];
+              m = block->get_node(i);
             }
             if (m == single_use) {
               lrgs(r)._area = 0.0;
@@ -772,20 +772,20 @@
 
     // Compute high pressure indice; avoid landing in the middle of projnodes
     j = hrp_index[0];
-    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
-      Node* cur = block->_nodes[j];
+    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
+      Node* cur = block->get_node(j);
       while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
         j--;
-        cur = block->_nodes[j];
+        cur = block->get_node(j);
       }
     }
     block->_ihrp_index = j;
     j = hrp_index[1];
-    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
-      Node* cur = block->_nodes[j];
+    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
+      Node* cur = block->get_node(j);
       while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
         j--;
-        cur = block->_nodes[j];
+        cur = block->get_node(j);
       }
     }
     block->_fhrp_index = j;
--- a/hotspot/src/share/vm/opto/lcm.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -58,14 +58,14 @@
 // The proj is the control projection for the not-null case.
 // The val is the pointer being checked for nullness or
 // decodeHeapOop_not_null node if it did not fold into address.
-void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
+void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
   // Assume if null check need for 0 offset then always needed
   // Intel solaris doesn't support any null checks yet and no
   // mechanism exists (yet) to set the switches at an os_cpu level
   if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
 
   // Make sure the ptr-is-null path appears to be uncommon!
-  float f = end()->as_MachIf()->_prob;
+  float f = block->end()->as_MachIf()->_prob;
   if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
   if( f > PROB_UNLIKELY_MAG(4) ) return;
 
@@ -75,13 +75,13 @@
   // Get the successor block for if the test ptr is non-null
   Block* not_null_block;  // this one goes with the proj
   Block* null_block;
-  if (_nodes[_nodes.size()-1] == proj) {
-    null_block     = _succs[0];
-    not_null_block = _succs[1];
+  if (block->get_node(block->number_of_nodes()-1) == proj) {
+    null_block     = block->_succs[0];
+    not_null_block = block->_succs[1];
   } else {
-    assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other");
-    not_null_block = _succs[0];
-    null_block     = _succs[1];
+    assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
+    not_null_block = block->_succs[0];
+    null_block     = block->_succs[1];
   }
   while (null_block->is_Empty() == Block::empty_with_goto) {
     null_block     = null_block->_succs[0];
@@ -93,8 +93,8 @@
   // detect failure of this optimization, as in 6366351.)
   {
     bool found_trap = false;
-    for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
-      Node* nn = null_block->_nodes[i1];
+    for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
+      Node* nn = null_block->get_node(i1);
       if (nn->is_MachCall() &&
           nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
         const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
@@ -237,20 +237,20 @@
     }
 
     // Check ctrl input to see if the null-check dominates the memory op
-    Block *cb = cfg->get_block_for_node(mach);
+    Block *cb = get_block_for_node(mach);
     cb = cb->_idom;             // Always hoist at least 1 block
     if( !was_store ) {          // Stores can be hoisted only one block
-      while( cb->_dom_depth > (_dom_depth + 1))
+      while( cb->_dom_depth > (block->_dom_depth + 1))
         cb = cb->_idom;         // Hoist loads as far as we want
       // The non-null-block should dominate the memory op, too. Live
       // range spilling will insert a spill in the non-null-block if it is
       // needs to spill the memory op for an implicit null check.
-      if (cb->_dom_depth == (_dom_depth + 1)) {
+      if (cb->_dom_depth == (block->_dom_depth + 1)) {
         if (cb != not_null_block) continue;
         cb = cb->_idom;
       }
     }
-    if( cb != this ) continue;
+    if( cb != block ) continue;
 
     // Found a memory user; see if it can be hoisted to check-block
     uint vidx = 0;              // Capture index of value into memop
@@ -262,8 +262,8 @@
         if( is_decoden ) continue;
       }
       // Block of memory-op input
-      Block *inb = cfg->get_block_for_node(mach->in(j));
-      Block *b = this;          // Start from nul check
+      Block *inb = get_block_for_node(mach->in(j));
+      Block *b = block;          // Start from nul check
       while( b != inb && b->_dom_depth > inb->_dom_depth )
         b = b->_idom;           // search upwards for input
       // See if input dominates null check
@@ -272,28 +272,28 @@
     }
     if( j > 0 )
       continue;
-    Block *mb = cfg->get_block_for_node(mach);
+    Block *mb = get_block_for_node(mach);
     // Hoisting stores requires more checks for the anti-dependence case.
     // Give up hoisting if we have to move the store past any load.
     if( was_store ) {
       Block *b = mb;            // Start searching here for a local load
       // mach use (faulting) trying to hoist
       // n might be blocker to hoisting
-      while( b != this ) {
+      while( b != block ) {
         uint k;
-        for( k = 1; k < b->_nodes.size(); k++ ) {
-          Node *n = b->_nodes[k];
+        for( k = 1; k < b->number_of_nodes(); k++ ) {
+          Node *n = b->get_node(k);
           if( n->needs_anti_dependence_check() &&
               n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
             break;              // Found anti-dependent load
         }
-        if( k < b->_nodes.size() )
+        if( k < b->number_of_nodes() )
           break;                // Found anti-dependent load
         // Make sure control does not do a merge (would have to check allpaths)
         if( b->num_preds() != 2 ) break;
-        b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
+        b = get_block_for_node(b->pred(1)); // Move up to predecessor block
       }
-      if( b != this ) continue;
+      if( b != block ) continue;
     }
 
     // Make sure this memory op is not already being used for a NullCheck
@@ -303,7 +303,7 @@
 
     // Found a candidate!  Pick one with least dom depth - the highest
     // in the dom tree should be closest to the null check.
-    if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
+    if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
       best = mach;
       bidx = vidx;
     }
@@ -319,46 +319,45 @@
 
   if( is_decoden ) {
     // Check if we need to hoist decodeHeapOop_not_null first.
-    Block *valb = cfg->get_block_for_node(val);
-    if( this != valb && this->_dom_depth < valb->_dom_depth ) {
+    Block *valb = get_block_for_node(val);
+    if( block != valb && block->_dom_depth < valb->_dom_depth ) {
       // Hoist it up to the end of the test block.
       valb->find_remove(val);
-      this->add_inst(val);
-      cfg->map_node_to_block(val, this);
+      block->add_inst(val);
+      map_node_to_block(val, block);
       // DecodeN on x86 may kill flags. Check for flag-killing projections
       // that also need to be hoisted.
       for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
         Node* n = val->fast_out(j);
         if( n->is_MachProj() ) {
-          cfg->get_block_for_node(n)->find_remove(n);
-          this->add_inst(n);
-          cfg->map_node_to_block(n, this);
+          get_block_for_node(n)->find_remove(n);
+          block->add_inst(n);
+          map_node_to_block(n, block);
         }
       }
     }
   }
   // Hoist the memory candidate up to the end of the test block.
-  Block *old_block = cfg->get_block_for_node(best);
+  Block *old_block = get_block_for_node(best);
   old_block->find_remove(best);
-  add_inst(best);
-  cfg->map_node_to_block(best, this);
+  block->add_inst(best);
+  map_node_to_block(best, block);
 
   // Move the control dependence
-  if (best->in(0) && best->in(0) == old_block->_nodes[0])
-    best->set_req(0, _nodes[0]);
+  if (best->in(0) && best->in(0) == old_block->head())
+    best->set_req(0, block->head());
 
   // Check for flag-killing projections that also need to be hoisted
   // Should be DU safe because no edge updates.
   for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
     Node* n = best->fast_out(j);
     if( n->is_MachProj() ) {
-      cfg->get_block_for_node(n)->find_remove(n);
-      add_inst(n);
-      cfg->map_node_to_block(n, this);
+      get_block_for_node(n)->find_remove(n);
+      block->add_inst(n);
+      map_node_to_block(n, block);
     }
   }
 
-  Compile *C = cfg->C;
   // proj==Op_True --> ne test; proj==Op_False --> eq test.
   // One of two graph shapes got matched:
   //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
@@ -368,10 +367,10 @@
   // We need to flip the projections to keep the same semantics.
   if( proj->Opcode() == Op_IfTrue ) {
     // Swap order of projections in basic block to swap branch targets
-    Node *tmp1 = _nodes[end_idx()+1];
-    Node *tmp2 = _nodes[end_idx()+2];
-    _nodes.map(end_idx()+1, tmp2);
-    _nodes.map(end_idx()+2, tmp1);
+    Node *tmp1 = block->get_node(block->end_idx()+1);
+    Node *tmp2 = block->get_node(block->end_idx()+2);
+    block->map_node(tmp2, block->end_idx()+1);
+    block->map_node(tmp1, block->end_idx()+2);
     Node *tmp = new (C) Node(C->top()); // Use not NULL input
     tmp1->replace_by(tmp);
     tmp2->replace_by(tmp1);
@@ -384,8 +383,8 @@
   // it as well.
   Node *old_tst = proj->in(0);
   MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
-  _nodes.map(end_idx(),nul_chk);
-  cfg->map_node_to_block(nul_chk, this);
+  block->map_node(nul_chk, block->end_idx());
+  map_node_to_block(nul_chk, block);
   // Redirect users of old_test to nul_chk
   for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
     old_tst->last_out(i2)->set_req(0, nul_chk);
@@ -393,8 +392,8 @@
   for (uint i3 = 0; i3 < old_tst->req(); i3++)
     old_tst->set_req(i3, NULL);
 
-  cfg->latency_from_uses(nul_chk);
-  cfg->latency_from_uses(best);
+  latency_from_uses(nul_chk);
+  latency_from_uses(best);
 }
 
 
@@ -408,7 +407,7 @@
 // remaining cases (most), choose the instruction with the greatest latency
 // (that is, the most number of pseudo-cycles required to the end of the
 // routine). If there is a tie, choose the instruction with the most inputs.
-Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
+Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
 
   // If only a single entry on the stack, use it
   uint cnt = worklist.size();
@@ -442,7 +441,7 @@
     }
 
     // Final call in a block must be adjacent to 'catch'
-    Node *e = end();
+    Node *e = block->end();
     if( e->is_Catch() && e->in(0)->in(0) == n )
       continue;
 
@@ -468,7 +467,7 @@
         Node* use = n->fast_out(j);
 
         // The use is a conditional branch, make them adjacent
-        if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
+        if (use->is_MachIf() && get_block_for_node(use) == block) {
           found_machif = true;
           break;
         }
@@ -501,7 +500,7 @@
       n_choice = 1;
     }
 
-    uint n_latency = cfg->get_latency_for_node(n);
+    uint n_latency = get_latency_for_node(n);
     uint n_score   = n->req();   // Many inputs get high score to break ties
 
     // Keep best latency found
@@ -529,13 +528,13 @@
 
 
 //------------------------------set_next_call----------------------------------
-void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
+void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
   if( next_call.test_set(n->_idx) ) return;
   for( uint i=0; i<n->len(); i++ ) {
     Node *m = n->in(i);
     if( !m ) continue;  // must see all nodes in block that precede call
-    if (cfg->get_block_for_node(m) == this) {
-      set_next_call(m, next_call, cfg);
+    if (get_block_for_node(m) == block) {
+      set_next_call(block, m, next_call);
     }
   }
 }
@@ -546,24 +545,26 @@
 // next subroutine call get priority - basically it moves things NOT needed
 // for the next call till after the call.  This prevents me from trying to
 // carry lots of stuff live across a call.
-void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
+void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
   // Find the next control-defining Node in this block
   Node* call = NULL;
   for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
     Node* m = this_call->fast_out(i);
-    if(cfg->get_block_for_node(m) == this && // Local-block user
+    if (get_block_for_node(m) == block && // Local-block user
         m != this_call &&       // Not self-start node
-        m->is_MachCall() )
+        m->is_MachCall()) {
       call = m;
       break;
+    }
   }
   if (call == NULL)  return;    // No next call (e.g., block end is near)
   // Set next-call for all inputs to this call
-  set_next_call(call, next_call, cfg);
+  set_next_call(block, call, next_call);
 }
 
 //------------------------------add_call_kills-------------------------------------
-void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
+// helper function that adds caller save registers to MachProjNode
+static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
   // Fill in the kill mask for the call
   for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
     if( !regs.Member(r) ) {     // Not already defined by the call
@@ -579,7 +580,7 @@
 
 
 //------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
   RegMask regs;
 
   // Schedule all the users of the call right now.  All the users are
@@ -592,18 +593,18 @@
     ready_cnt.at_put(n->_idx, n_cnt);
     assert( n_cnt == 0, "" );
     // Schedule next to call
-    _nodes.map(node_cnt++, n);
+    block->map_node(n, node_cnt++);
     // Collect defined registers
     regs.OR(n->out_RegMask());
     // Check for scheduling the next control-definer
     if( n->bottom_type() == Type::CONTROL )
       // Warm up next pile of heuristic bits
-      needed_for_next_call(n, next_call, cfg);
+      needed_for_next_call(block, n, next_call);
 
     // Children of projections are now all ready
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j); // Get user
-      if(cfg->get_block_for_node(m) != this) {
+      if(get_block_for_node(m) != block) {
         continue;
       }
       if( m->is_Phi() ) continue;
@@ -617,14 +618,14 @@
 
   // Act as if the call defines the Frame Pointer.
   // Certainly the FP is alive and well after the call.
-  regs.Insert(matcher.c_frame_pointer());
+  regs.Insert(_matcher.c_frame_pointer());
 
   // Set all registers killed and not already defined by the call.
   uint r_cnt = mcall->tf()->range()->cnt();
   int op = mcall->ideal_Opcode();
-  MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
-  cfg->map_node_to_block(proj, this);
-  _nodes.insert(node_cnt++, proj);
+  MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
+  map_node_to_block(proj, block);
+  block->insert_node(proj, node_cnt++);
 
   // Select the right register save policy.
   const char * save_policy;
@@ -633,13 +634,13 @@
     case Op_CallLeaf:
     case Op_CallLeafNoFP:
       // Calling C code so use C calling convention
-      save_policy = matcher._c_reg_save_policy;
+      save_policy = _matcher._c_reg_save_policy;
       break;
 
     case Op_CallStaticJava:
     case Op_CallDynamicJava:
       // Calling Java code so use Java calling convention
-      save_policy = matcher._register_save_policy;
+      save_policy = _matcher._register_save_policy;
       break;
 
     default:
@@ -674,44 +675,46 @@
 
 //------------------------------schedule_local---------------------------------
 // Topological sort within a block.  Someday become a real scheduler.
-bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
+bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
   // Already "sorted" are the block start Node (as the first entry), and
   // the block-ending Node and any trailing control projections.  We leave
   // these alone.  PhiNodes and ParmNodes are made to follow the block start
   // Node.  Everything else gets topo-sorted.
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
-      tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
-      for (uint i = 0;i < _nodes.size();i++) {
+    if (trace_opto_pipelining()) {
+      tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
+      for (uint i = 0;i < block->number_of_nodes(); i++) {
         tty->print("# ");
-        _nodes[i]->fast_dump();
+        block->get_node(i)->fast_dump();
       }
       tty->print_cr("#");
     }
 #endif
 
   // RootNode is already sorted
-  if( _nodes.size() == 1 ) return true;
+  if (block->number_of_nodes() == 1) {
+    return true;
+  }
 
   // Move PhiNodes and ParmNodes from 1 to cnt up to the start
-  uint node_cnt = end_idx();
+  uint node_cnt = block->end_idx();
   uint phi_cnt = 1;
   uint i;
   for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
-    Node *n = _nodes[i];
+    Node *n = block->get_node(i);
     if( n->is_Phi() ||          // Found a PhiNode or ParmNode
-        (n->is_Proj()  && n->in(0) == head()) ) {
+        (n->is_Proj()  && n->in(0) == block->head()) ) {
       // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
-      _nodes.map(i,_nodes[phi_cnt]);
-      _nodes.map(phi_cnt++,n);  // swap Phi/Parm up front
+      block->map_node(block->get_node(phi_cnt), i);
+      block->map_node(n, phi_cnt++);  // swap Phi/Parm up front
     } else {                    // All others
       // Count block-local inputs to 'n'
       uint cnt = n->len();      // Input count
       uint local = 0;
       for( uint j=0; j<cnt; j++ ) {
         Node *m = n->in(j);
-        if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
+        if( m && get_block_for_node(m) == block && !m->is_top() )
           local++;              // One more block-local input
       }
       ready_cnt.at_put(n->_idx, local); // Count em up
@@ -723,7 +726,7 @@
           for (uint prec = n->req(); prec < n->len(); prec++) {
             Node* oop_store = n->in(prec);
             if (oop_store != NULL) {
-              assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+              assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
             }
           }
         }
@@ -747,16 +750,16 @@
       }
     }
   }
-  for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
-    ready_cnt.at_put(_nodes[i2]->_idx, 0);
+  for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
+    ready_cnt.at_put(block->get_node(i2)->_idx, 0);
 
   // All the prescheduled guys do not hold back internal nodes
   uint i3;
   for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
-    Node *n = _nodes[i3];       // Get pre-scheduled
+    Node *n = block->get_node(i3);       // Get pre-scheduled
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j);
-      if (cfg->get_block_for_node(m) == this) { // Local-block user
+      if (get_block_for_node(m) == block) { // Local-block user
         int m_cnt = ready_cnt.at(m->_idx)-1;
         ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
       }
@@ -767,7 +770,7 @@
   // Make a worklist
   Node_List worklist;
   for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
-    Node *m = _nodes[i4];
+    Node *m = block->get_node(i4);
     if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
       if (m->is_iteratively_computed()) {
         // Push induction variable increments last to allow other uses
@@ -789,15 +792,15 @@
   }
 
   // Warm up the 'next_call' heuristic bits
-  needed_for_next_call(_nodes[0], next_call, cfg);
+  needed_for_next_call(block, block->head(), next_call);
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
-      for (uint j=0; j<_nodes.size(); j++) {
-        Node     *n = _nodes[j];
+    if (trace_opto_pipelining()) {
+      for (uint j=0; j< block->number_of_nodes(); j++) {
+        Node     *n = block->get_node(j);
         int     idx = n->_idx;
         tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
-        tty->print("latency:%3d  ", cfg->get_latency_for_node(n));
+        tty->print("latency:%3d  ", get_latency_for_node(n));
         tty->print("%4d: %s\n", idx, n->Name());
       }
     }
@@ -808,7 +811,7 @@
   while( worklist.size() ) {    // Worklist is not ready
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
+    if (trace_opto_pipelining()) {
       tty->print("#   ready list:");
       for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
         Node *n = worklist[i];      // Get Node on worklist
@@ -819,13 +822,13 @@
 #endif
 
     // Select and pop a ready guy from worklist
-    Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
-    _nodes.map(phi_cnt++,n);    // Schedule him next
+    Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
+    block->map_node(n, phi_cnt++);    // Schedule him next
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
+    if (trace_opto_pipelining()) {
       tty->print("#    select %d: %s", n->_idx, n->Name());
-      tty->print(", latency:%d", cfg->get_latency_for_node(n));
+      tty->print(", latency:%d", get_latency_for_node(n));
       n->dump();
       if (Verbose) {
         tty->print("#   ready list:");
@@ -840,26 +843,26 @@
 #endif
     if( n->is_MachCall() ) {
       MachCallNode *mcall = n->as_MachCall();
-      phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
+      phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
       continue;
     }
 
     if (n->is_Mach() && n->as_Mach()->has_call()) {
       RegMask regs;
-      regs.Insert(matcher.c_frame_pointer());
+      regs.Insert(_matcher.c_frame_pointer());
       regs.OR(n->out_RegMask());
 
-      MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
-      cfg->map_node_to_block(proj, this);
-      _nodes.insert(phi_cnt++, proj);
+      MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
+      map_node_to_block(proj, block);
+      block->insert_node(proj, phi_cnt++);
 
-      add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
+      add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
     }
 
     // Children are now all ready
     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
       Node* m = n->fast_out(i5); // Get user
-      if (cfg->get_block_for_node(m) != this) {
+      if (get_block_for_node(m) != block) {
         continue;
       }
       if( m->is_Phi() ) continue;
@@ -874,9 +877,8 @@
     }
   }
 
-  if( phi_cnt != end_idx() ) {
+  if( phi_cnt != block->end_idx() ) {
     // did not schedule all.  Retry, Bailout, or Die
-    Compile* C = matcher.C;
     if (C->subsume_loads() == true && !C->failing()) {
       // Retry with subsume_loads == false
       // If this is the first failure, the sentinel string will "stick"
@@ -888,12 +890,12 @@
   }
 
 #ifndef PRODUCT
-  if (cfg->trace_opto_pipelining()) {
+  if (trace_opto_pipelining()) {
     tty->print_cr("#");
     tty->print_cr("# after schedule_local");
-    for (uint i = 0;i < _nodes.size();i++) {
+    for (uint i = 0;i < block->number_of_nodes();i++) {
       tty->print("# ");
-      _nodes[i]->fast_dump();
+      block->get_node(i)->fast_dump();
     }
     tty->cr();
   }
@@ -919,7 +921,7 @@
 }
 
 //------------------------------catch_cleanup_find_cloned_def------------------
-static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
+Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
   assert( use_blk != def_blk, "Inter-block cleanup only");
 
   // The use is some block below the Catch.  Find and return the clone of the def
@@ -945,14 +947,14 @@
     // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
     Node_Array inputs = new Node_List(Thread::current()->resource_area());
     for(uint k = 1; k < use_blk->num_preds(); k++) {
-      Block* block = cfg->get_block_for_node(use_blk->pred(k));
-      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
+      Block* block = get_block_for_node(use_blk->pred(k));
+      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
     }
 
     // Check to see if the use_blk already has an identical phi inserted.
     // If it exists, it will be at the first position since all uses of a
     // def are processed together.
-    Node *phi = use_blk->_nodes[1];
+    Node *phi = use_blk->get_node(1);
     if( phi->is_Phi() ) {
       fixup = phi;
       for (uint k = 1; k < use_blk->num_preds(); k++) {
@@ -967,8 +969,8 @@
     // If an existing PhiNode was not found, make a new one.
     if (fixup == NULL) {
       Node *new_phi = PhiNode::make(use_blk->head(), def);
-      use_blk->_nodes.insert(1, new_phi);
-      cfg->map_node_to_block(new_phi, use_blk);
+      use_blk->insert_node(new_phi, 1);
+      map_node_to_block(new_phi, use_blk);
       for (uint k = 1; k < use_blk->num_preds(); k++) {
         new_phi->set_req(k, inputs[k]);
       }
@@ -977,7 +979,7 @@
 
   } else {
     // Found the use just below the Catch.  Make it use the clone.
-    fixup = use_blk->_nodes[n_clone_idx];
+    fixup = use_blk->get_node(n_clone_idx);
   }
 
   return fixup;
@@ -997,36 +999,36 @@
   for( uint k = 0; k < blk->_num_succs; k++ ) {
     // Get clone in each successor block
     Block *sb = blk->_succs[k];
-    Node *clone = sb->_nodes[offset_idx+1];
+    Node *clone = sb->get_node(offset_idx+1);
     assert( clone->Opcode() == use->Opcode(), "" );
 
     // Make use-clone reference the def-clone
-    catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]);
+    catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
   }
 }
 
 //------------------------------catch_cleanup_inter_block---------------------
 // Fix all input edges in use that reference "def".  The use is in a different
 // block than the def.
-static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
+void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
   if( !use_blk ) return;        // Can happen if the use is a precedence edge
 
-  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
+  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
   catch_cleanup_fix_all_inputs(use, def, new_def);
 }
 
 //------------------------------call_catch_cleanup-----------------------------
 // If we inserted any instructions between a Call and his CatchNode,
 // clone the instructions on all paths below the Catch.
-void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
+void PhaseCFG::call_catch_cleanup(Block* block) {
 
   // End of region to clone
-  uint end = end_idx();
-  if( !_nodes[end]->is_Catch() ) return;
+  uint end = block->end_idx();
+  if( !block->get_node(end)->is_Catch() ) return;
   // Start of region to clone
   uint beg = end;
-  while(!_nodes[beg-1]->is_MachProj() ||
-        !_nodes[beg-1]->in(0)->is_MachCall() ) {
+  while(!block->get_node(beg-1)->is_MachProj() ||
+        !block->get_node(beg-1)->in(0)->is_MachCall() ) {
     beg--;
     assert(beg > 0,"Catch cleanup walking beyond block boundary");
   }
@@ -1035,15 +1037,15 @@
 
   // Clone along all Catch output paths.  Clone area between the 'beg' and
   // 'end' indices.
-  for( uint i = 0; i < _num_succs; i++ ) {
-    Block *sb = _succs[i];
+  for( uint i = 0; i < block->_num_succs; i++ ) {
+    Block *sb = block->_succs[i];
     // Clone the entire area; ignoring the edge fixup for now.
     for( uint j = end; j > beg; j-- ) {
       // It is safe here to clone a node with anti_dependence
       // since clones dominate on each path.
-      Node *clone = _nodes[j-1]->clone();
-      sb->_nodes.insert( 1, clone );
-      cfg->map_node_to_block(clone, sb);
+      Node *clone = block->get_node(j-1)->clone();
+      sb->insert_node(clone, 1);
+      map_node_to_block(clone, sb);
     }
   }
 
@@ -1051,7 +1053,7 @@
   // Fixup edges.  Check the def-use info per cloned Node
   for(uint i2 = beg; i2 < end; i2++ ) {
     uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
-    Node *n = _nodes[i2];        // Node that got cloned
+    Node *n = block->get_node(i2);        // Node that got cloned
     // Need DU safe iterator because of edge manipulation in calls.
     Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
     for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
@@ -1060,19 +1062,19 @@
     uint max = out->size();
     for (uint j = 0; j < max; j++) {// For all users
       Node *use = out->pop();
-      Block *buse = cfg->get_block_for_node(use);
+      Block *buse = get_block_for_node(use);
       if( use->is_Phi() ) {
         for( uint k = 1; k < use->req(); k++ )
           if( use->in(k) == n ) {
-            Block* block = cfg->get_block_for_node(buse->pred(k));
-            Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
+            Block* b = get_block_for_node(buse->pred(k));
+            Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
             use->set_req(k, fixup);
           }
       } else {
-        if (this == buse) {
-          catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
+        if (block == buse) {
+          catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
         } else {
-          catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
+          catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
         }
       }
     } // End for all users
@@ -1081,30 +1083,30 @@
 
   // Remove the now-dead cloned ops
   for(uint i3 = beg; i3 < end; i3++ ) {
-    _nodes[beg]->disconnect_inputs(NULL, C);
-    _nodes.remove(beg);
+    block->get_node(beg)->disconnect_inputs(NULL, C);
+    block->remove_node(beg);
   }
 
   // If the successor blocks have a CreateEx node, move it back to the top
-  for(uint i4 = 0; i4 < _num_succs; i4++ ) {
-    Block *sb = _succs[i4];
+  for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
+    Block *sb = block->_succs[i4];
     uint new_cnt = end - beg;
     // Remove any newly created, but dead, nodes.
     for( uint j = new_cnt; j > 0; j-- ) {
-      Node *n = sb->_nodes[j];
+      Node *n = sb->get_node(j);
       if (n->outcnt() == 0 &&
           (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
         n->disconnect_inputs(NULL, C);
-        sb->_nodes.remove(j);
+        sb->remove_node(j);
         new_cnt--;
       }
     }
     // If any newly created nodes remain, move the CreateEx node to the top
     if (new_cnt > 0) {
-      Node *cex = sb->_nodes[1+new_cnt];
+      Node *cex = sb->get_node(1+new_cnt);
       if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
-        sb->_nodes.remove(1+new_cnt);
-        sb->_nodes.insert(1,cex);
+        sb->remove_node(1+new_cnt);
+        sb->insert_node(cex, 1);
       }
     }
   }
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -2756,10 +2756,28 @@
       newval = _gvn.makecon(TypePtr::NULL_PTR);
 
     // Reference stores need a store barrier.
-    pre_barrier(true /* do_load*/,
-                control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
-                NULL /* pre_val*/,
-                T_OBJECT);
+    if (kind == LS_xchg) {
+      // If pre-barrier must execute before the oop store, old value will require do_load here.
+      if (!can_move_pre_barrier()) {
+        pre_barrier(true /* do_load*/,
+                    control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
+                    NULL /* pre_val*/,
+                    T_OBJECT);
+      } // Else move pre_barrier to use load_store value, see below.
+    } else if (kind == LS_cmpxchg) {
+      // Same as for newval above:
+      if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
+        oldval = _gvn.makecon(TypePtr::NULL_PTR);
+      }
+      // The only known value which might get overwritten is oldval.
+      pre_barrier(false /* do_load */,
+                  control(), NULL, NULL, max_juint, NULL, NULL,
+                  oldval /* pre_val */,
+                  T_OBJECT);
+    } else {
+      ShouldNotReachHere();
+    }
+
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@@ -2795,16 +2813,27 @@
   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
   set_memory(proj, alias_idx);
 
+  if (type == T_OBJECT && kind == LS_xchg) {
+#ifdef _LP64
+    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+      load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
+    }
+#endif
+    if (can_move_pre_barrier()) {
+      // Don't need to load pre_val. The old value is returned by load_store.
+      // The pre_barrier can execute after the xchg as long as no safepoint
+      // gets inserted between them.
+      pre_barrier(false /* do_load */,
+                  control(), NULL, NULL, max_juint, NULL, NULL,
+                  load_store /* pre_val */,
+                  T_OBJECT);
+    }
+  }
+
   // Add the trailing membar surrounding the access
   insert_mem_bar(Op_MemBarCPUOrder);
   insert_mem_bar(Op_MemBarAcquire);
 
-#ifdef _LP64
-  if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
-    load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
-  }
-#endif
-
   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
   set_result(load_store);
   return true;
--- a/hotspot/src/share/vm/opto/live.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/live.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -85,8 +85,8 @@
     IndexSet* def = &_defs[block->_pre_order-1];
     DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
     uint i;
-    for (i = block->_nodes.size(); i > 1; i--) {
-      Node* n = block->_nodes[i-1];
+    for (i = block->number_of_nodes(); i > 1; i--) {
+      Node* n = block->get_node(i-1);
       if (n->is_Phi()) {
         break;
       }
@@ -112,7 +112,7 @@
 #endif
     // Remove anything defined by Phis and the block start instruction
     for (uint k = i; k > 0; k--) {
-      uint r = _names[block->_nodes[k - 1]->_idx];
+      uint r = _names[block->get_node(k - 1)->_idx];
       def->insert(r);
       use->remove(r);
     }
@@ -124,7 +124,7 @@
 
       // PhiNode uses go in the live-out set of prior blocks.
       for (uint k = i; k > 0; k--) {
-        add_liveout(p, _names[block->_nodes[k-1]->in(l)->_idx], first_pass);
+        add_liveout(p, _names[block->get_node(k-1)->in(l)->_idx], first_pass);
       }
     }
     freeset(block);
@@ -254,10 +254,10 @@
 void PhaseLive::dump( const Block *b ) const {
   tty->print("Block %d: ",b->_pre_order);
   tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
-  uint cnt = b->_nodes.size();
+  uint cnt = b->number_of_nodes();
   for( uint i=0; i<cnt; i++ ) {
-    tty->print("L%d/", _names[b->_nodes[i]->_idx] );
-    b->_nodes[i]->dump();
+    tty->print("L%d/", _names[b->get_node(i)->_idx] );
+    b->get_node(i)->dump();
   }
   tty->print("\n");
 }
@@ -269,7 +269,7 @@
   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
     Block* block = _cfg.get_block(i);
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j-1];
+      Node* n = block->get_node(j-1);
       if (n->is_Phi()) {
         break;
       }
--- a/hotspot/src/share/vm/opto/machnode.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/machnode.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@
 class MachOper : public ResourceObj {
 public:
   // Allocate right next to the MachNodes in the same arena
-  void *operator new( size_t x, Compile* C ) { return C->node_arena()->Amalloc_D(x); }
+  void *operator new( size_t x, Compile* C ) throw() { return C->node_arena()->Amalloc_D(x); }
 
   // Opcode
   virtual uint opcode() const = 0;
--- a/hotspot/src/share/vm/opto/macro.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -72,6 +72,8 @@
   int jvms_adj  = new_dbg_start - old_dbg_start;
   assert (new_dbg_start == newcall->req(), "argument count mismatch");
 
+  // SafePointScalarObject node could be referenced several times in debug info.
+  // Use Dict to record cloned nodes.
   Dict* sosn_map = new Dict(cmpkey,hashkey);
   for (uint i = old_dbg_start; i < oldcall->req(); i++) {
     Node* old_in = oldcall->in(i);
@@ -79,8 +81,8 @@
     if (old_in != NULL && old_in->is_SafePointScalarObject()) {
       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
       uint old_unique = C->unique();
-      Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
-      if (old_unique != C->unique()) {
+      Node* new_in = old_sosn->clone(sosn_map);
+      if (old_unique != C->unique()) { // New node?
         new_in->set_req(0, C->root()); // reset control edge
         new_in = transform_later(new_in); // Register new node.
       }
@@ -725,7 +727,11 @@
   while (safepoints.length() > 0) {
     SafePointNode* sfpt = safepoints.pop();
     Node* mem = sfpt->memory();
-    uint first_ind = sfpt->req();
+    assert(sfpt->jvms() != NULL, "missed JVMS");
+    // Fields of scalar objs are referenced only at the end
+    // of regular debuginfo at the last (youngest) JVMS.
+    // Record relative start index.
+    uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
     SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
 #ifdef ASSERT
                                                  alloc,
@@ -799,7 +805,7 @@
           for (int i = start; i < end; i++) {
             if (sfpt_done->in(i)->is_SafePointScalarObject()) {
               SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
-              if (scobj->first_index() == sfpt_done->req() &&
+              if (scobj->first_index(jvms) == sfpt_done->req() &&
                   scobj->n_fields() == (uint)nfields) {
                 assert(scobj->alloc() == alloc, "sanity");
                 sfpt_done->set_req(i, res);
--- a/hotspot/src/share/vm/opto/node.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/node.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -773,6 +773,21 @@
   _in[_cnt] = NULL;       // NULL out emptied slot
 }
 
+//------------------------------del_req_ordered--------------------------------
+// Delete the required edge and compact the edge array with preserved order
+void Node::del_req_ordered( uint idx ) {
+  assert( idx < _cnt, "oob");
+  assert( !VerifyHashTableKeys || _hash_lock == 0,
+          "remove node from hash table before modifying it");
+  // First remove corresponding def-use edge
+  Node *n = in(idx);
+  if (n != NULL) n->del_out((Node *)this);
+  if (idx < _cnt - 1) { // Not last edge ?
+    Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*)));
+  }
+  _in[--_cnt] = NULL;   // NULL out emptied slot
+}
+
 //------------------------------ins_req----------------------------------------
 // Insert a new required input at the end
 void Node::ins_req( uint idx, Node *n ) {
--- a/hotspot/src/share/vm/opto/node.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -211,7 +211,7 @@
 
   // New Operator that takes a Compile pointer, this will eventually
   // be the "new" New operator.
-  inline void* operator new( size_t x, Compile* C) {
+  inline void* operator new( size_t x, Compile* C) throw() {
     Node* n = (Node*)C->node_arena()->Amalloc_D(x);
 #ifdef ASSERT
     n->_in = (Node**)n; // magic cookie for assertion check
@@ -384,6 +384,7 @@
   void add_req( Node *n ); // Append a NEW required input
   void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
   void del_req( uint idx ); // Delete required edge & compact
+  void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
   void ins_req( uint i, Node *n ); // Insert a NEW required input
   void set_req( uint i, Node *n ) {
     assert( is_not_dead(n), "can not use dead node");
--- a/hotspot/src/share/vm/opto/output.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -57,7 +57,7 @@
 // Convert Nodes to instruction bits and pass off to the VM
 void Compile::Output() {
   // RootNode goes
-  assert( _cfg->get_root_block()->_nodes.size() == 0, "" );
+  assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
 
   // The number of new nodes (mostly MachNop) is proportional to
   // the number of java calls and inner loops which are aligned.
@@ -70,11 +70,11 @@
   Block *entry = _cfg->get_block(1);
   Block *broot = _cfg->get_root_block();
 
-  const StartNode *start = entry->_nodes[0]->as_Start();
+  const StartNode *start = entry->head()->as_Start();
 
   // Replace StartNode with prolog
   MachPrologNode *prolog = new (this) MachPrologNode();
-  entry->_nodes.map( 0, prolog );
+  entry->map_node(prolog, 0);
   _cfg->map_node_to_block(prolog, entry);
   _cfg->unmap_node_from_block(start); // start is no longer in any block
 
@@ -144,8 +144,8 @@
     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
       tty->print("\nBB#%03d:\n", i);
       Block* block = _cfg->get_block(i);
-      for (uint j = 0; j < block->_nodes.size(); j++) {
-        Node* n = block->_nodes[j];
+      for (uint j = 0; j < block->number_of_nodes(); j++) {
+        Node* n = block->get_node(j);
         OptoReg::Name reg = _regalloc->get_reg_first(n);
         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
         n->dump();
@@ -226,8 +226,8 @@
   // Insert call to zap runtime stub before every node with an oop map
   for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
     Block *b = _cfg->get_block(i);
-    for ( uint j = 0;  j < b->_nodes.size();  ++j ) {
-      Node *n = b->_nodes[j];
+    for ( uint j = 0;  j < b->number_of_nodes();  ++j ) {
+      Node *n = b->get_node(j);
 
       // Determining if we should insert a zap-a-lot node in output.
       // We do that for all nodes that has oopmap info, except for calls
@@ -256,7 +256,7 @@
         }
         if (insert) {
           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
-          b->_nodes.insert( j, zap );
+          b->insert_node(zap, j);
           _cfg->map_node_to_block(zap, b);
           ++j;
         }
@@ -379,10 +379,10 @@
     DEBUG_ONLY( jmp_rule[i]   = 0; )
 
     // Sum all instruction sizes to compute block size
-    uint last_inst = block->_nodes.size();
+    uint last_inst = block->number_of_nodes();
     uint blk_size = 0;
     for (uint j = 0; j < last_inst; j++) {
-      Node* nj = block->_nodes[j];
+      Node* nj = block->get_node(j);
       // Handle machine instruction nodes
       if (nj->is_Mach()) {
         MachNode *mach = nj->as_Mach();
@@ -477,18 +477,18 @@
     for (uint i = 0; i < nblocks; i++) {
       Block* block = _cfg->get_block(i);
       int idx = jmp_nidx[i];
-      MachNode* mach = (idx == -1) ? NULL: block->_nodes[idx]->as_Mach();
+      MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
       if (mach != NULL && mach->may_be_short_branch()) {
 #ifdef ASSERT
         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
         int j;
         // Find the branch; ignore trailing NOPs.
-        for (j = block->_nodes.size()-1; j>=0; j--) {
-          Node* n = block->_nodes[j];
+        for (j = block->number_of_nodes()-1; j>=0; j--) {
+          Node* n = block->get_node(j);
           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
             break;
         }
-        assert(j >= 0 && j == idx && block->_nodes[j] == (Node*)mach, "sanity");
+        assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
 #endif
         int br_size = jmp_size[i];
         int br_offs = blk_starts[i] + jmp_offset[i];
@@ -522,7 +522,7 @@
             diff -= nop_size;
           }
           adjust_block_start += diff;
-          block->_nodes.map(idx, replacement);
+          block->map_node(replacement, idx);
           mach->subsume_by(replacement, C);
           mach = replacement;
           progress = true;
@@ -639,7 +639,7 @@
                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
       Compile::set_sv_for_object_node(objs, sv);
 
-      uint first_ind = spobj->first_index();
+      uint first_ind = spobj->first_index(sfpt->jvms());
       for (uint i = 0; i < spobj->n_fields(); i++) {
         Node* fld_node = sfpt->in(first_ind+i);
         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
@@ -894,7 +894,7 @@
     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
 
     // Loop over monitors and insert into array
-    for(idx = 0; idx < num_mon; idx++) {
+    for (idx = 0; idx < num_mon; idx++) {
       // Grab the node that defines this monitor
       Node* box_node = sfn->monitor_box(jvms, idx);
       Node* obj_node = sfn->monitor_obj(jvms, idx);
@@ -902,11 +902,11 @@
       // Create ScopeValue for object
       ScopeValue *scval = NULL;
 
-      if( obj_node->is_SafePointScalarObject() ) {
+      if (obj_node->is_SafePointScalarObject()) {
         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
         scval = Compile::sv_for_node_id(objs, spobj->_idx);
         if (scval == NULL) {
-          const Type *t = obj_node->bottom_type();
+          const Type *t = spobj->bottom_type();
           ciKlass* cik = t->is_oopptr()->klass();
           assert(cik->is_instance_klass() ||
                  cik->is_array_klass(), "Not supported allocation.");
@@ -914,14 +914,14 @@
                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
           Compile::set_sv_for_object_node(objs, sv);
 
-          uint first_ind = spobj->first_index();
+          uint first_ind = spobj->first_index(youngest_jvms);
           for (uint i = 0; i < spobj->n_fields(); i++) {
             Node* fld_node = sfn->in(first_ind+i);
             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
           }
           scval = sv;
         }
-      } else if( !obj_node->is_Con() ) {
+      } else if (!obj_node->is_Con()) {
         OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
         if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
           scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
@@ -1088,8 +1088,8 @@
     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
       Block* b = _cfg->get_block(i);
 
-      for (uint j = 0; j < b->_nodes.size(); j++) {
-        Node* n = b->_nodes[j];
+      for (uint j = 0; j < b->number_of_nodes(); j++) {
+        Node* n = b->get_node(j);
 
         // If the node is a MachConstantNode evaluate the constant
         // value section.
@@ -1247,14 +1247,14 @@
     // Define the label at the beginning of the basic block
     MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
 
-    uint last_inst = block->_nodes.size();
+    uint last_inst = block->number_of_nodes();
 
     // Emit block normally, except for last instruction.
     // Emit means "dump code bits into code buffer".
     for (uint j = 0; j<last_inst; j++) {
 
       // Get the node
-      Node* n = block->_nodes[j];
+      Node* n = block->get_node(j);
 
       // See if delay slots are supported
       if (valid_bundle_info(n) &&
@@ -1308,7 +1308,7 @@
           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
           int nops_cnt = padding / nop_size;
           MachNode *nop = new (this) MachNopNode(nops_cnt);
-          block->_nodes.insert(j++, nop);
+          block->insert_node(nop, j++);
           last_inst++;
           _cfg->map_node_to_block(nop, block);
           nop->emit(*cb, _regalloc);
@@ -1394,7 +1394,7 @@
               // Insert padding between avoid_back_to_back branches.
               if (needs_padding && replacement->avoid_back_to_back()) {
                 MachNode *nop = new (this) MachNopNode();
-                block->_nodes.insert(j++, nop);
+                block->insert_node(nop, j++);
                 _cfg->map_node_to_block(nop, block);
                 last_inst++;
                 nop->emit(*cb, _regalloc);
@@ -1407,7 +1407,7 @@
               jmp_size[i]   = new_size;
               jmp_rule[i]   = mach->rule();
 #endif
-              block->_nodes.map(j, replacement);
+              block->map_node(replacement, j);
               mach->subsume_by(replacement, C);
               n    = replacement;
               mach = replacement;
@@ -1438,7 +1438,7 @@
             count++;
             uint i4;
             for (i4 = 0; i4 < last_inst; ++i4) {
-              if (block->_nodes[i4] == oop_store) {
+              if (block->get_node(i4) == oop_store) {
                 break;
               }
             }
@@ -1548,7 +1548,7 @@
       int padding = nb->alignment_padding(current_offset);
       if( padding > 0 ) {
         MachNode *nop = new (this) MachNopNode(padding / nop_size);
-        block->_nodes.insert(block->_nodes.size(), nop);
+        block->insert_node(nop, block->number_of_nodes());
         _cfg->map_node_to_block(nop, block);
         nop->emit(*cb, _regalloc);
         current_offset = cb->insts_size();
@@ -1655,8 +1655,8 @@
     int j;
 
     // Find the branch; ignore trailing NOPs.
-    for (j = block->_nodes.size() - 1; j >= 0; j--) {
-      n = block->_nodes[j];
+    for (j = block->number_of_nodes() - 1; j >= 0; j--) {
+      n = block->get_node(j);
       if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
         break;
       }
@@ -1675,8 +1675,8 @@
       uint call_return = call_returns[block->_pre_order];
 #ifdef ASSERT
       assert( call_return > 0, "no call seen for this basic block" );
-      while (block->_nodes[--j]->is_MachProj()) ;
-      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
+      while (block->get_node(--j)->is_MachProj()) ;
+      assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
 #endif
       // last instruction is a CatchNode, find it's CatchProjNodes
       int nof_succs = block->_num_succs;
@@ -1782,7 +1782,7 @@
   // Get the last node
   Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
 
-  _next_node = block->_nodes[block->_nodes.size() - 1];
+  _next_node = block->get_node(block->number_of_nodes() - 1);
 }
 
 #ifndef PRODUCT
@@ -1875,7 +1875,7 @@
     // Used to allow latency 0 to force an instruction to the beginning
     // of the bb
     uint latency = 1;
-    Node *use = bb->_nodes[j];
+    Node *use = bb->get_node(j);
     uint nlen = use->len();
 
     // Walk over all the inputs
@@ -2286,7 +2286,7 @@
        (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
 
     // Push any trailing projections
-    if( bb->_nodes[bb->_nodes.size()-1] != n ) {
+    if( bb->get_node(bb->number_of_nodes()-1) != n ) {
       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
         Node *foi = n->fast_out(i);
         if( foi->is_Proj() )
@@ -2329,21 +2329,21 @@
   _unconditional_delay_slot = NULL;
 
 #ifdef ASSERT
-  for( uint i=0; i < bb->_nodes.size(); i++ )
-    assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
+  for( uint i=0; i < bb->number_of_nodes(); i++ )
+    assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
 #endif
 
   // Force the _uses count to never go to zero for unscheduable pieces
   // of the block
   for( uint k = 0; k < _bb_start; k++ )
-    _uses[bb->_nodes[k]->_idx] = 1;
-  for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
-    _uses[bb->_nodes[l]->_idx] = 1;
+    _uses[bb->get_node(k)->_idx] = 1;
+  for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
+    _uses[bb->get_node(l)->_idx] = 1;
 
   // Iterate backwards over the instructions in the block.  Don't count the
   // branch projections at end or the block header instructions.
   for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
-    Node *n = bb->_nodes[j];
+    Node *n = bb->get_node(j);
     if( n->is_Proj() ) continue; // Projections handled another way
 
     // Account for all uses
@@ -2398,8 +2398,8 @@
 #ifndef PRODUCT
     if (_cfg->C->trace_opto_output()) {
       tty->print("#  Schedule BB#%03d (initial)\n", i);
-      for (uint j = 0; j < bb->_nodes.size(); j++) {
-        bb->_nodes[j]->dump();
+      for (uint j = 0; j < bb->number_of_nodes(); j++) {
+        bb->get_node(j)->dump();
       }
     }
 #endif
@@ -2426,10 +2426,10 @@
     }
 
     // Leave untouched the starting instruction, any Phis, a CreateEx node
-    // or Top.  bb->_nodes[_bb_start] is the first schedulable instruction.
-    _bb_end = bb->_nodes.size()-1;
+    // or Top.  bb->get_node(_bb_start) is the first schedulable instruction.
+    _bb_end = bb->number_of_nodes()-1;
     for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
-      Node *n = bb->_nodes[_bb_start];
+      Node *n = bb->get_node(_bb_start);
       // Things not matched, like Phinodes and ProjNodes don't get scheduled.
       // Also, MachIdealNodes do not get scheduled
       if( !n->is_Mach() ) continue;     // Skip non-machine nodes
@@ -2449,19 +2449,19 @@
     // in the block), because they have delay slots we can fill.  Calls all
     // have their delay slots filled in the template expansions, so we don't
     // bother scheduling them.
-    Node *last = bb->_nodes[_bb_end];
+    Node *last = bb->get_node(_bb_end);
     // Ignore trailing NOPs.
     while (_bb_end > 0 && last->is_Mach() &&
            last->as_Mach()->ideal_Opcode() == Op_Con) {
-      last = bb->_nodes[--_bb_end];
+      last = bb->get_node(--_bb_end);
     }
     assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
     if( last->is_Catch() ||
        // Exclude unreachable path case when Halt node is in a separate block.
        (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
       // There must be a prior call.  Skip it.
-      while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
-        assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
+      while( !bb->get_node(--_bb_end)->is_MachCall() ) {
+        assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
       }
     } else if( last->is_MachNullCheck() ) {
       // Backup so the last null-checked memory instruction is
@@ -2470,7 +2470,7 @@
       Node *mem = last->in(1);
       do {
         _bb_end--;
-      } while (mem != bb->_nodes[_bb_end]);
+      } while (mem != bb->get_node(_bb_end));
     } else {
       // Set _bb_end to point after last schedulable inst.
       _bb_end++;
@@ -2499,7 +2499,7 @@
     assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
 #ifdef ASSERT
     for( uint l = _bb_start; l < _bb_end; l++ ) {
-      Node *n = bb->_nodes[l];
+      Node *n = bb->get_node(l);
       uint m;
       for( m = 0; m < _bb_end-_bb_start; m++ )
         if( _scheduled[m] == n )
@@ -2510,14 +2510,14 @@
 
     // Now copy the instructions (in reverse order) back to the block
     for ( uint k = _bb_start; k < _bb_end; k++ )
-      bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
+      bb->map_node(_scheduled[_bb_end-k-1], k);
 
 #ifndef PRODUCT
     if (_cfg->C->trace_opto_output()) {
       tty->print("#  Schedule BB#%03d (final)\n", i);
       uint current = 0;
-      for (uint j = 0; j < bb->_nodes.size(); j++) {
-        Node *n = bb->_nodes[j];
+      for (uint j = 0; j < bb->number_of_nodes(); j++) {
+        Node *n = bb->get_node(j);
         if( valid_bundle_info(n) ) {
           Bundle *bundle = node_bundling(n);
           if (bundle->instr_count() > 0 || bundle->flags() > 0) {
@@ -2579,8 +2579,8 @@
   // Walk over the block backwards.  Check to make sure each DEF doesn't
   // kill a live value (other than the one it's supposed to).  Add each
   // USE to the live set.
-  for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
-    Node *n = b->_nodes[i];
+  for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
+    Node *n = b->get_node(i);
     int n_op = n->Opcode();
     if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
       // Fat-proj kills a slew of registers
@@ -2711,7 +2711,7 @@
         pinch->req() == 1 ) {   // pinch not yet in block?
       pinch->del_req(0);        // yank pointer to later-def, also set flag
       // Insert the pinch-point in the block just after the last use
-      b->_nodes.insert(b->find_node(use)+1,pinch);
+      b->insert_node(pinch, b->find_node(use) + 1);
       _bb_end++;                // Increase size scheduled region in block
     }
 
@@ -2763,10 +2763,10 @@
   // it being in the current block.
   bool fat_proj_seen = false;
   uint last_safept = _bb_end-1;
-  Node* end_node         = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
+  Node* end_node         = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
   Node* last_safept_node = end_node;
   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
       // Fat-proj kills a slew of registers
@@ -2815,7 +2815,7 @@
     // Do not allow defs of new derived values to float above GC
     // points unless the base is definitely available at the GC point.
 
-    Node *m = b->_nodes[i];
+    Node *m = b->get_node(i);
 
     // Add precedence edge from following safepoint to use of derived pointer
     if( last_safept_node != end_node &&
@@ -2832,11 +2832,11 @@
 
     if( n->jvms() ) {           // Precedence edge from derived to safept
       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
-      if( b->_nodes[last_safept] != last_safept_node ) {
+      if( b->get_node(last_safept) != last_safept_node ) {
         last_safept = b->find_node(last_safept_node);
       }
       for( uint j=last_safept; j > i; j-- ) {
-        Node *mach = b->_nodes[j];
+        Node *mach = b->get_node(j);
         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
           mach->add_prec( n );
       }
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1648,10 +1648,10 @@
     bool block_not_printed = true;
 
     // and each instruction within a block
-    uint end_index = block->_nodes.size();
+    uint end_index = block->number_of_nodes();
     // block->end_idx() not valid after PhaseRegAlloc
     for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) {
-      Node     *n = block->_nodes.at(instruction_index);
+      Node     *n = block->get_node(instruction_index);
       if( n->is_Mach() ) {
         MachNode *m = n->as_Mach();
         int deleted_count = 0;
@@ -1673,7 +1673,7 @@
             }
             // Print instructions being deleted
             for( int i = (deleted_count - 1); i >= 0; --i ) {
-              block->_nodes.at(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
+              block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
             }
             tty->print_cr("replaced with");
             // Print new instruction
@@ -1687,11 +1687,11 @@
           //  the node index to live range mappings.)
           uint safe_instruction_index = (instruction_index - deleted_count);
           for( ; (instruction_index > safe_instruction_index); --instruction_index ) {
-            block->_nodes.remove( instruction_index );
+            block->remove_node( instruction_index );
           }
           // install new node after safe_instruction_index
-          block->_nodes.insert( safe_instruction_index + 1, m2 );
-          end_index = block->_nodes.size() - 1; // Recompute new block size
+          block->insert_node(m2, safe_instruction_index + 1);
+          end_index = block->number_of_nodes() - 1; // Recompute new block size
           NOT_PRODUCT( inc_peepholes(); )
         }
       }
--- a/hotspot/src/share/vm/opto/postaloc.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/postaloc.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -423,8 +423,8 @@
 
     // Count of Phis in block
     uint phi_dex;
-    for (phi_dex = 1; phi_dex < block->_nodes.size(); phi_dex++) {
-      Node* phi = block->_nodes[phi_dex];
+    for (phi_dex = 1; phi_dex < block->number_of_nodes(); phi_dex++) {
+      Node* phi = block->get_node(phi_dex);
       if (!phi->is_Phi()) {
         break;
       }
@@ -439,7 +439,7 @@
       Block* pb = _cfg.get_block_for_node(block->pred(j));
       // Remove copies along phi edges
       for (uint k = 1; k < phi_dex; k++) {
-        elide_copy(block->_nodes[k], j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
+        elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
       }
       if (blk2value[pb->_pre_order]) { // Have a mapping on this edge?
         // See if this predecessor's mappings have been used by everybody
@@ -510,7 +510,7 @@
     // For all Phi's
     for (j = 1; j < phi_dex; j++) {
       uint k;
-      Node *phi = block->_nodes[j];
+      Node *phi = block->get_node(j);
       uint pidx = _lrg_map.live_range_id(phi);
       OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
 
@@ -522,7 +522,7 @@
           u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
       }
       if (u != NodeSentinel) {    // Junk Phi.  Remove
-        block->_nodes.remove(j--);
+        block->remove_node(j--);
         phi_dex--;
         _cfg.unmap_node_from_block(phi);
         phi->replace_by(u);
@@ -552,8 +552,8 @@
     }
 
     // For all remaining instructions
-    for (j = phi_dex; j < block->_nodes.size(); j++) {
-      Node* n = block->_nodes[j];
+    for (j = phi_dex; j < block->number_of_nodes(); j++) {
+      Node* n = block->get_node(j);
 
       if(n->outcnt() == 0 &&   // Dead?
          n != C->top() &&      // (ignore TOP, it has no du info)
--- a/hotspot/src/share/vm/opto/reg_split.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/reg_split.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -112,17 +112,17 @@
 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
   // Skip intervening ProjNodes.  Do not insert between a ProjNode and
   // its definer.
-  while( i < b->_nodes.size() &&
-         (b->_nodes[i]->is_Proj() ||
-          b->_nodes[i]->is_Phi() ) )
+  while( i < b->number_of_nodes() &&
+         (b->get_node(i)->is_Proj() ||
+          b->get_node(i)->is_Phi() ) )
     i++;
 
   // Do not insert between a call and his Catch
-  if( b->_nodes[i]->is_Catch() ) {
+  if( b->get_node(i)->is_Catch() ) {
     // Put the instruction at the top of the fall-thru block.
     // Find the fall-thru projection
     while( 1 ) {
-      const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj();
+      const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
       if( cp->_con == CatchProjNode::fall_through_index )
         break;
     }
@@ -131,7 +131,7 @@
     i = 1;                      // Right at start of block
   }
 
-  b->_nodes.insert(i,spill);    // Insert node in block
+  b->insert_node(spill, i);    // Insert node in block
   _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
   // Adjust the point where we go hi-pressure
   if( i <= b->_ihrp_index ) b->_ihrp_index++;
@@ -160,9 +160,9 @@
   // (The implicit_null_check function ensures the use is also dominated
   // by the branch-not-taken block.)
   Node *be = b->end();
-  if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) {
+  if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
     // Spill goes in the branch-not-taken block
-    b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue];
+    b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
     loc = 0;                    // Just past the Region
   }
   assert( loc >= 0, "must insert past block head" );
@@ -450,7 +450,7 @@
 
   // Scan block for 1st use.
   for( uint i = 1; i <= b->end_idx(); i++ ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Ignore PHI use, these can be up or down
     if (n->is_Phi()) {
       continue;
@@ -647,7 +647,7 @@
 
       // check block for appropriate phinode & update edges
       for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
-        n1 = b->_nodes[insidx];
+        n1 = b->get_node(insidx);
         // bail if this is not a phi
         phi = n1->is_Phi() ? n1->as_Phi() : NULL;
         if( phi == NULL ) {
@@ -747,7 +747,7 @@
     //----------Walk Instructions in the Block and Split----------
     // For all non-phi instructions in the block
     for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
-      Node *n = b->_nodes[insidx];
+      Node *n = b->get_node(insidx);
       // Find the defining Node's live range index
       uint defidx = _lrg_map.find_id(n);
       uint cnt = n->req();
@@ -776,7 +776,7 @@
               assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
               n->replace_by(u); // Then replace with unique input
               n->disconnect_inputs(NULL, C);
-              b->_nodes.remove(insidx);
+              b->remove_node(insidx);
               insidx--;
               b->_ihrp_index--;
               b->_fhrp_index--;
@@ -789,12 +789,12 @@
               (b->_reg_pressure < (uint)INTPRESSURE) ||
               b->_ihrp_index > 4000000 ||
               b->_ihrp_index >= b->end_idx() ||
-              !b->_nodes[b->_ihrp_index]->is_Proj(), "" );
+              !b->get_node(b->_ihrp_index)->is_Proj(), "" );
       assert( insidx > b->_fhrp_index ||
               (b->_freg_pressure < (uint)FLOATPRESSURE) ||
               b->_fhrp_index > 4000000 ||
               b->_fhrp_index >= b->end_idx() ||
-              !b->_nodes[b->_fhrp_index]->is_Proj(), "" );
+              !b->get_node(b->_fhrp_index)->is_Proj(), "" );
 
       // ********** Handle Crossing HRP Boundry **********
       if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
@@ -819,7 +819,7 @@
                 // Insert point is just past last use or def in the block
                 int insert_point = insidx-1;
                 while( insert_point > 0 ) {
-                  Node *n = b->_nodes[insert_point];
+                  Node *n = b->get_node(insert_point);
                   // Hit top of block?  Quit going backwards
                   if (n->is_Phi()) {
                     break;
@@ -865,7 +865,7 @@
             }
           }  // end if LRG is UP
         }  // end for all spilling live ranges
-        assert( b->_nodes[insidx] == n, "got insidx set incorrectly" );
+        assert( b->get_node(insidx) == n, "got insidx set incorrectly" );
       }  // end if crossing HRP Boundry
 
       // If the LRG index is oob, then this is a new spillcopy, skip it.
@@ -878,7 +878,7 @@
       if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
         n->replace_by( n->in(copyidx) );
         n->set_req( copyidx, NULL );
-        b->_nodes.remove(insidx--);
+        b->remove_node(insidx--);
         b->_ihrp_index--; // Adjust the point where we go hi-pressure
         b->_fhrp_index--;
         continue;
@@ -932,10 +932,10 @@
             // Rematerializable?  Then clone def at use site instead
             // of store/load
             if( def->rematerialize() ) {
-              int old_size = b->_nodes.size();
+              int old_size = b->number_of_nodes();
               def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
               if( !def ) return 0; // Bail out
-              insidx += b->_nodes.size()-old_size;
+              insidx += b->number_of_nodes()-old_size;
             }
 
             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
@@ -1332,8 +1332,8 @@
         // so look at the node before it.
         int insert = pred->end_idx();
         while (insert >= 1 &&
-               pred->_nodes[insert - 1]->is_SpillCopy() &&
-               _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
+               pred->get_node(insert - 1)->is_SpillCopy() &&
+               _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
           insert--;
         }
         def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
@@ -1402,7 +1402,7 @@
   for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
     b  = _cfg.get_block(bidx);
     for (insidx = 0; insidx <= b->end_idx(); insidx++) {
-      Node *n = b->_nodes[insidx];
+      Node *n = b->get_node(insidx);
       uint defidx = _lrg_map.find(n);
       assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
       assert(defidx < maxlrg,"Bad live range index in Split");
--- a/hotspot/src/share/vm/opto/type.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/opto/type.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -169,7 +169,7 @@
 
 public:
 
-  inline void* operator new( size_t x ) {
+  inline void* operator new( size_t x ) throw() {
     Compile* compile = Compile::current();
     compile->set_type_last_size(x);
     void *temp = compile->type_arena()->Amalloc_D(x);
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1605,17 +1605,6 @@
   return result;
 }
 
-void Arguments::set_heap_base_min_address() {
-  if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) {
-    // By default HeapBaseMinAddress is 2G on all platforms except Solaris x86.
-    // G1 currently needs a lot of C-heap, so on Solaris we have to give G1
-    // some extra space for the C-heap compared to other collectors.
-    // Use FLAG_SET_DEFAULT here rather than FLAG_SET_ERGO to make sure that
-    // code that checks for default values work correctly.
-    FLAG_SET_DEFAULT(HeapBaseMinAddress, 1*G);
-  }
-}
-
 void Arguments::set_heap_size() {
   if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
     // Deprecated flag
@@ -2230,7 +2219,7 @@
   // among the distinct pages.
   if (ContendedPaddingWidth < 0 || ContendedPaddingWidth > 8192) {
     jio_fprintf(defaultStream::error_stream(),
-                "ContendedPaddingWidth=" INTX_FORMAT " must be the between %d and %d\n",
+                "ContendedPaddingWidth=" INTX_FORMAT " must be in between %d and %d\n",
                 ContendedPaddingWidth, 0, 8192);
     status = false;
   }
@@ -2239,7 +2228,7 @@
   // It is sufficient to check against the largest type size.
   if ((ContendedPaddingWidth % BytesPerLong) != 0) {
     jio_fprintf(defaultStream::error_stream(),
-                "ContendedPaddingWidth=" INTX_FORMAT " must be the multiple of %d\n",
+                "ContendedPaddingWidth=" INTX_FORMAT " must be a multiple of %d\n",
                 ContendedPaddingWidth, BytesPerLong);
     status = false;
   }
@@ -3537,8 +3526,6 @@
     }
   }
 
-  set_heap_base_min_address();
-
   // Set heap size based on available physical memory
   set_heap_size();
 
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -334,8 +334,6 @@
   // limits the given memory size by the maximum amount of memory this process is
   // currently allowed to allocate or reserve.
   static julong limit_by_allocatable_memory(julong size);
-  // Setup HeapBaseMinAddress
-  static void set_heap_base_min_address();
   // Setup heap size
   static void set_heap_size();
   // Based on automatic selection criteria, should the
--- a/hotspot/src/share/vm/runtime/fprofiler.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/fprofiler.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -264,7 +264,7 @@
 
  public:
 
-  void* operator new(size_t size, ThreadProfiler* tp);
+  void* operator new(size_t size, ThreadProfiler* tp) throw();
   void  operator delete(void* p);
 
   ProfilerNode() {
@@ -373,7 +373,7 @@
   }
 };
 
-void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp){
+void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() {
   void* result = (void*) tp->area_top;
   tp->area_top += size;
 
@@ -925,6 +925,8 @@
       FlatProfiler::interval_print();
       FlatProfiler::interval_reset();
     }
+
+    FREE_C_HEAP_ARRAY(JavaThread *, threadsList, mtInternal);
   } else {
     // Couldn't get the threads lock, just record that rather than blocking
     FlatProfiler::threads_lock_ticks += 1;
--- a/hotspot/src/share/vm/runtime/globals.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -205,6 +205,7 @@
 
 #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
 #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
+#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT },
 #ifdef PRODUCT
   #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
   #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
@@ -260,7 +261,7 @@
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
 #endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
+ C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -3514,6 +3514,8 @@
           "Temporary flag for transition to AbstractMethodError wrapped "   \
           "in InvocationTargetException. See 6531596")                      \
                                                                             \
+  develop(bool, VerifyLambdaBytecodes, false,                               \
+          "Force verification of jdk 8 lambda metafactory bytecodes.")      \
                                                                             \
   develop(intx, FastSuperclassLimit, 8,                                     \
           "Depth of hardwired instanceof accelerator array")                \
@@ -3685,15 +3687,9 @@
   develop(bool, TraceDefaultMethods, false,                                 \
           "Trace the default method processing steps")                      \
                                                                             \
-  develop(bool, ParseAllGenericSignatures, false,                           \
-          "Parse all generic signatures while classloading")                \
-                                                                            \
   develop(bool, VerifyGenericSignatures, false,                             \
           "Abort VM on erroneous or inconsistent generic signatures")       \
                                                                             \
-  product(bool, ParseGenericDefaults, false,                                \
-          "Parse generic signatures for default method handling")           \
-                                                                            \
   product(bool, UseVMInterruptibleIO, false,                                \
           "(Unstable, Solaris-specific) Thread interrupt before or with "   \
           "EINTR for I/O operations results in OS_INTRPT. The default value"\
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -57,6 +57,7 @@
 
 #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
+#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
 #ifdef PRODUCT
   #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
   #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
@@ -99,7 +100,7 @@
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
 #endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
+ C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
 #endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
@@ -131,6 +132,7 @@
 
 #define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
 #define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
 #ifdef PRODUCT
   #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
   #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
@@ -204,6 +206,7 @@
           C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
           C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+          C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
           C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
 #endif
 #ifdef COMPILER2
--- a/hotspot/src/share/vm/runtime/handles.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/handles.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -179,11 +179,11 @@
   _thread->set_last_handle_mark(previous_handle_mark());
 }
 
-void* HandleMark::operator new(size_t size) {
+void* HandleMark::operator new(size_t size) throw() {
   return AllocateHeap(size, mtThread);
 }
 
-void* HandleMark::operator new [] (size_t size) {
+void* HandleMark::operator new [] (size_t size) throw() {
   return AllocateHeap(size, mtThread);
 }
 
--- a/hotspot/src/share/vm/runtime/handles.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/handles.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -309,8 +309,8 @@
   // called in the destructor of HandleMarkCleaner
   void pop_and_restore();
   // overloaded operators
-  void* operator new(size_t size);
-  void* operator new [](size_t size);
+  void* operator new(size_t size) throw();
+  void* operator new [](size_t size) throw();
   void operator delete(void* p);
   void operator delete[](void* p);
 };
--- a/hotspot/src/share/vm/runtime/interfaceSupport.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@
   }
 
  private:
-  inline void* operator new(size_t size, void* ptr) {
+  inline void* operator new(size_t size, void* ptr) throw() {
     return ptr;
   }
 };
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -312,10 +312,10 @@
  public:
   static int Knob_Verbose;
   static int Knob_SpinLimit;
-  void* operator new (size_t size) {
+  void* operator new (size_t size) throw() {
     return AllocateHeap(size, mtInternal);
   }
-  void* operator new[] (size_t size) {
+  void* operator new[] (size_t size) throw() {
     return operator new (size);
   }
   void operator delete(void* p) {
--- a/hotspot/src/share/vm/runtime/os.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1485,44 +1485,6 @@
   return result;
 }
 
-// Read file line by line, if line is longer than bsize,
-// skip rest of line.
-int os::get_line_chars(int fd, char* buf, const size_t bsize){
-  size_t sz, i = 0;
-
-  // read until EOF, EOL or buf is full
-  while ((sz = (int) read(fd, &buf[i], 1)) == 1 && i < (bsize-2) && buf[i] != '\n') {
-     ++i;
-  }
-
-  if (buf[i] == '\n') {
-    // EOL reached so ignore EOL character and return
-
-    buf[i] = 0;
-    return (int) i;
-  }
-
-  buf[i+1] = 0;
-
-  if (sz != 1) {
-    // EOF reached. if we read chars before EOF return them and
-    // return EOF on next call otherwise return EOF
-
-    return (i == 0) ? -1 : (int) i;
-  }
-
-  // line is longer than size of buf, skip to EOL
-  char ch;
-  while (read(fd, &ch, 1) == 1 && ch != '\n') {
-    // Do nothing
-  }
-
-  // return initial part of line that fits in buf.
-  // If we reached EOF, it will be returned on next call.
-
-  return (int) i;
-}
-
 void os::SuspendedThreadTask::run() {
   assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
   internal_do_task();
--- a/hotspot/src/share/vm/runtime/os.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -738,10 +738,6 @@
   // Hook for os specific jvm options that we don't want to abort on seeing
   static bool obsolete_option(const JavaVMOption *option);
 
-  // Read file line by line. If line is longer than bsize,
-  // rest of line is skipped. Returns number of bytes read or -1 on EOF
-  static int get_line_chars(int fd, char *buf, const size_t bsize);
-
   // Extensions
 #include "runtime/os_ext.hpp"
 
--- a/hotspot/src/share/vm/runtime/park.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/park.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,7 +140,7 @@
 // well as bank access imbalance on Niagara-like platforms,
 // although Niagara's hash function should help.
 
-void * ParkEvent::operator new (size_t sz) {
+void * ParkEvent::operator new (size_t sz) throw() {
   return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
 }
 
--- a/hotspot/src/share/vm/runtime/park.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/park.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -166,7 +166,7 @@
     // aligned on 256-byte address boundaries.  This ensures that the least
     // significant byte of a ParkEvent address is always 0.
 
-    void * operator new (size_t sz) ;
+    void * operator new (size_t sz) throw();
     void operator delete (void * a) ;
 
   public:
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1051,7 +1051,8 @@
 
   // Find receiver for non-static call
   if (bc != Bytecodes::_invokestatic &&
-      bc != Bytecodes::_invokedynamic) {
+      bc != Bytecodes::_invokedynamic &&
+      bc != Bytecodes::_invokehandle) {
     // This register map must be update since we need to find the receiver for
     // compiled frames. The receiver might be in a register.
     RegisterMap reg_map2(thread);
@@ -1078,7 +1079,7 @@
 
 #ifdef ASSERT
   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
-  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
+  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) {
     assert(receiver.not_null(), "should have thrown exception");
     KlassHandle receiver_klass(THREAD, receiver->klass());
     Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
@@ -1240,9 +1241,9 @@
 #endif
 
   if (is_virtual) {
-    assert(receiver.not_null(), "sanity check");
+    assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
-    KlassHandle h_klass(THREAD, receiver->klass());
+    KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
                      is_optimized, static_bound, virtual_call_info,
                      CHECK_(methodHandle()));
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -3636,6 +3636,16 @@
   CompileBroker::compilation_init();
 #endif
 
+  if (EnableInvokeDynamic) {
+    // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
+    // It is done after compilers are initialized, because otherwise compilations of
+    // signature polymorphic MH intrinsics can be missed
+    // (see SystemDictionary::find_method_handle_intrinsic).
+    initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
+    initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
+    initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
+  }
+
 #if INCLUDE_MANAGEMENT
   Management::initialize(THREAD);
 #endif // INCLUDE_MANAGEMENT
--- a/hotspot/src/share/vm/runtime/thread.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -113,8 +113,9 @@
   // Support for forcing alignment of thread objects for biased locking
   void*       _real_malloc_address;
  public:
-  void* operator new(size_t size) { return allocate(size, true); }
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) { return allocate(size, false); }
+  void* operator new(size_t size) throw() { return allocate(size, true); }
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
+    return allocate(size, false); }
   void  operator delete(void* p);
 
  protected:
--- a/hotspot/src/share/vm/services/memRecorder.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/services/memRecorder.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -53,13 +53,13 @@
     }
   }
 
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
     // the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
     // to avoid recursion
     return os::malloc(size, (mtNMT | otNMTRecorder));
   }
 
-  void* operator new(size_t size) {
+  void* operator new(size_t size) throw() {
     assert(false, "use nothrow version");
     return NULL;
   }
--- a/hotspot/src/share/vm/services/memTrackWorker.cpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/services/memTrackWorker.cpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,12 +63,12 @@
   }
 }
 
-void* MemTrackWorker::operator new(size_t size) {
+void* MemTrackWorker::operator new(size_t size) throw() {
   assert(false, "use nothrow version");
   return NULL;
 }
 
-void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
   return allocate(size, false, mtNMT);
 }
 
--- a/hotspot/src/share/vm/services/memTrackWorker.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/services/memTrackWorker.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,8 +90,8 @@
  public:
   MemTrackWorker(MemSnapshot* snapshot);
   ~MemTrackWorker();
-  _NOINLINE_ void* operator new(size_t size);
-  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant);
+  _NOINLINE_ void* operator new(size_t size) throw();
+  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw();
 
   void start();
   void run();
--- a/hotspot/src/share/vm/utilities/array.hpp	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/src/share/vm/utilities/array.hpp	Fri Sep 06 11:04:00 2013 -0700
@@ -317,7 +317,7 @@
   Array(const Array<T>&);
   void operator=(const Array<T>&);
 
-  void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) {
+  void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) throw() {
     size_t word_size = Array::size(length);
     return (void*) Metaspace::allocate(loader_data, word_size, read_only,
                                        MetaspaceObj::array_type(sizeof(T)), CHECK_NULL);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/gcbarriers/G1CrashTest.java	Fri Sep 06 11:04:00 2013 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8023472
+ * @summary C2 optimization breaks with G1
+ *
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Dcount=100000 G1CrashTest
+ *
+ * @author pbiswal@palantir.com
+ */
+
+public class G1CrashTest {
+    static Object[] set = new Object[11];
+
+    public static void main(String[] args) throws InterruptedException {
+        for (int j = 0; j < Integer.getInteger("count"); j++) {
+            Object key = new Object();
+            insertKey(key);
+            if (j > set.length / 2) {
+                Object[] oldKeys = set;
+                set = new Object[2 * set.length - 1];
+                for (Object o : oldKeys) {
+                    if (o != null)
+                        insertKey(o);
+                }
+            }
+        }
+    }
+
+    static void insertKey(Object key) {
+        int hash = key.hashCode() & 0x7fffffff;
+        int index = hash % set.length;
+        Object cur = set[index];
+        if (cur == null)
+            set[index] = key;
+        else
+            insertKeyRehash(key, index, hash, cur);
+    }
+
+    static void insertKeyRehash(Object key, int index, int hash, Object cur) {
+        int loopIndex = index;
+        int firstRemoved = -1;
+        do {
+            if (cur == "dead")
+                firstRemoved = 1;
+            index--;
+            if (index < 0)
+                index += set.length;
+            cur = set[index];
+            if (cur == null) {
+                if (firstRemoved != -1)
+                    set[firstRemoved] = "dead";
+                else
+                    set[index] = key;
+                return;
+            }
+        } while (index != loopIndex);
+        if (firstRemoved != -1)
+            set[firstRemoved] = null;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jsr292/ConcurrentClassLoadingTest.java	Fri Sep 06 11:04:00 2013 -0700
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8022595
+ * @summary JSR292: deadlock during class loading of MethodHandles, MethodHandleImpl & MethodHandleNatives
+ *
+ * @run main/othervm ConcurrentClassLoadingTest
+ */
+import java.util.*;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+
+public class ConcurrentClassLoadingTest {
+    int numThreads = 0;
+    long seed = 0;
+    CyclicBarrier l;
+    Random rand;
+
+    public static void main(String[] args) throws Throwable {
+        ConcurrentClassLoadingTest test = new ConcurrentClassLoadingTest();
+        test.parseArgs(args);
+        test.run();
+    }
+
+    void parseArgs(String[] args) {
+        int i = 0;
+        while (i < args.length) {
+            String flag = args[i];
+            switch(flag) {
+                case "-seed":
+                    seed = Long.parseLong(args[++i]);
+                    break;
+                case "-numThreads":
+                    numThreads = Integer.parseInt(args[++i]);
+                    break;
+                default:
+                    throw new Error("Unknown flag: " + flag);
+            }
+            ++i;
+        }
+    }
+
+    void init() {
+        if (numThreads == 0) {
+            numThreads = Runtime.getRuntime().availableProcessors();
+        }
+
+        if (seed == 0) {
+            seed = (new Random()).nextLong();
+        }
+        rand = new Random(seed);
+
+        l = new CyclicBarrier(numThreads + 1);
+
+        System.out.printf("Threads: %d\n", numThreads);
+        System.out.printf("Seed: %d\n", seed);
+    }
+
+    final List<Loader> loaders = new ArrayList<>();
+
+    void prepare() {
+        List<String> c = new ArrayList<>(Arrays.asList(classNames));
+
+        // Split classes between loading threads
+        int count = (classNames.length / numThreads) + 1;
+        for (int t = 0; t < numThreads; t++) {
+            List<String> sel = new ArrayList<>();
+
+            System.out.printf("Thread #%d:\n", t);
+            for (int i = 0; i < count; i++) {
+                if (c.size() == 0) break;
+
+                int k = rand.nextInt(c.size());
+                String elem = c.remove(k);
+                sel.add(elem);
+                System.out.printf("\t%s\n", elem);
+            }
+            loaders.add(new Loader(sel));
+        }
+
+        // Print diagnostic info when the test hangs
+        Runtime.getRuntime().addShutdownHook(new Thread() {
+            public void run() {
+                boolean alive = false;
+                for (Loader l : loaders) {
+                    if (!l.isAlive())  continue;
+
+                    if (!alive) {
+                        System.out.println("Some threads are still alive:");
+                        alive = true;
+                    }
+
+                    System.out.println(l.getName());
+                    for (StackTraceElement elem : l.getStackTrace()) {
+                        System.out.println("\t"+elem.toString());
+                    }
+                }
+            }
+        });
+    }
+
+    public void run() throws Throwable {
+        init();
+        prepare();
+
+        for (Loader loader : loaders) {
+            loader.start();
+        }
+
+        l.await();
+
+        for (Loader loader : loaders) {
+            loader.join();
+        }
+    }
+
+    class Loader extends Thread {
+        List<String> classes;
+
+        public Loader(List<String> classes) {
+            this.classes = classes;
+            setDaemon(true);
+        }
+
+        @Override
+        public void run() {
+            try {
+                l.await();
+
+                for (String name : classes) {
+                    Class.forName(name).getName();
+                }
+            } catch (ClassNotFoundException | BrokenBarrierException | InterruptedException e) {
+                throw new Error(e);
+            }
+        }
+    }
+
+    final static String[] classNames = {
+            "java.lang.invoke.AbstractValidatingLambdaMetafactory",
+            "java.lang.invoke.BoundMethodHandle",
+            "java.lang.invoke.CallSite",
+            "java.lang.invoke.ConstantCallSite",
+            "java.lang.invoke.DirectMethodHandle",
+            "java.lang.invoke.InnerClassLambdaMetafactory",
+            "java.lang.invoke.InvokeDynamic",
+            "java.lang.invoke.InvokeGeneric",
+            "java.lang.invoke.InvokerBytecodeGenerator",
+            "java.lang.invoke.Invokers",
+            "java.lang.invoke.LambdaConversionException",
+            "java.lang.invoke.LambdaForm",
+            "java.lang.invoke.LambdaMetafactory",
+            "java.lang.invoke.MagicLambdaImpl",
+            "java.lang.invoke.MemberName",
+            "java.lang.invoke.MethodHandle",
+            "java.lang.invoke.MethodHandleImpl",
+            "java.lang.invoke.MethodHandleInfo",
+            "java.lang.invoke.MethodHandleNatives",
+            "java.lang.invoke.MethodHandleProxies",
+            "java.lang.invoke.MethodHandles",
+            "java.lang.invoke.MethodHandleStatics",
+            "java.lang.invoke.MethodType",
+            "java.lang.invoke.MethodTypeForm",
+            "java.lang.invoke.MutableCallSite",
+            "java.lang.invoke.SerializedLambda",
+            "java.lang.invoke.SimpleMethodHandle",
+            "java.lang.invoke.SwitchPoint",
+            "java.lang.invoke.TypeConvertingMethodAdapter",
+            "java.lang.invoke.VolatileCallSite",
+            "java.lang.invoke.WrongMethodTypeException"
+    };
+}
--- a/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Fri Sep 06 11:04:00 2013 -0700
@@ -51,9 +51,8 @@
         output.shouldHaveExitValue(0);
 
       } catch (RuntimeException e) {
-        // Report 'passed' if CDS was turned off because we could not allocate
-        // the klass metaspace at an address that would work with CDS.
-        output.shouldContain("Could not allocate metaspace at a compatible address");
+        // Report 'passed' if CDS was turned off.
+        output.shouldContain("Unable to use shared archive");
         output.shouldHaveExitValue(1);
       }
     }
--- a/hotspot/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Fri Sep 06 11:04:00 2013 -0700
@@ -69,7 +69,7 @@
                 "-server", "-Xshare:on", "-XX:+UnlockDiagnosticVMOptions",
                 "-XX:SharedArchiveFile=./sample.jsa", "-version");
             output = new OutputAnalyzer(pb.start());
-            output.shouldContain("Could not allocate metaspace at a compatible address");
+            output.shouldContain("Unable to use shared archive");
             output.shouldHaveExitValue(1);
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/InitialThreadOverflow/DoOverflow.java	Fri Sep 06 11:04:00 2013 -0700
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class DoOverflow {
+
+    static int count;
+
+    public void overflow() {
+        count+=1;
+        overflow();
+    }
+
+    public static void printIt() {
+        System.out.println("Going to overflow stack");
+        try {
+            new DoOverflow().overflow();
+        } catch(java.lang.StackOverflowError e) {
+            System.out.println("Overflow OK " + count);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/InitialThreadOverflow/invoke.cxx	Fri Sep 06 11:04:00 2013 -0700
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <assert.h>
+#include <jni.h>
+
+#include <pthread.h>
+
+JavaVM* jvm;
+
+void *
+floobydust (void *p) {
+  JNIEnv *env;
+
+  jvm->AttachCurrentThread((void**)&env, NULL);
+
+  jclass class_id = env->FindClass ("DoOverflow");
+  assert (class_id);
+
+  jmethodID method_id = env->GetStaticMethodID(class_id, "printIt", "()V");
+  assert (method_id);
+
+  env->CallStaticVoidMethod(class_id, method_id, NULL);
+
+  jvm->DetachCurrentThread();
+}
+
+int
+main (int argc, const char** argv) {
+  JavaVMOption options[1];
+  options[0].optionString = (char*) "-Xss320k";
+
+  JavaVMInitArgs vm_args;
+  vm_args.version = JNI_VERSION_1_2;
+  vm_args.ignoreUnrecognized = JNI_TRUE;
+  vm_args.options = options;
+  vm_args.nOptions = 1;
+
+  JNIEnv* env;
+  jint result = JNI_CreateJavaVM(&jvm, (void**)&env, &vm_args);
+  assert(result >= 0);
+
+  pthread_t thr;
+  pthread_create(&thr, NULL, floobydust, NULL);
+  pthread_join(thr, NULL);
+
+  floobydust(NULL);
+
+  return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/InitialThreadOverflow/testme.sh	Fri Sep 06 11:04:00 2013 -0700
@@ -0,0 +1,73 @@
+#!/bin/sh
+
+# Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+
+# @test testme.sh
+# @bug 8009062
+# @summary Poor performance of JNI AttachCurrentThread after fix for 7017193
+# @compile DoOverflow.java
+# @run shell testme.sh
+
+set -x
+if [ "${TESTSRC}" = "" ]
+then
+  TESTSRC=${PWD}
+  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
+fi
+echo "TESTSRC=${TESTSRC}"
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../../test_env.sh
+
+if [ "${VM_OS}" != "linux" ]
+then
+  echo "Test only valid for Linux"
+  exit 0
+fi
+
+gcc_cmd=`which gcc`
+if [ "x$gcc_cmd" == "x" ]; then
+    echo "WARNING: gcc not found. Cannot execute test." 2>&1
+    exit 0;
+fi
+
+CFLAGS="-m${VM_BITS}"
+
+LD_LIBRARY_PATH=.:${COMPILEJAVA}/jre/lib/${VM_CPU}/${VM_TYPE}:/usr/lib:$LD_LIBRARY_PATH
+export LD_LIBRARY_PATH
+
+cp ${TESTSRC}${FS}invoke.cxx .
+
+# Copy the result of our @compile action:
+cp ${TESTCLASSES}${FS}DoOverflow.class .
+
+echo "Compilation flag: ${COMP_FLAG}"
+# Note pthread may not be found thus invoke creation will fail to be created.
+# Check to ensure you have a /usr/lib/libpthread.so if you don't please look
+# for /usr/lib/`uname -m`-linux-gnu version ensure to add that path to below compilation.
+
+$gcc_cmd -DLINUX ${CFLAGS} -o invoke \
+    -I${COMPILEJAVA}/include -I${COMPILEJAVA}/include/linux \
+    -L${COMPILEJAVA}/jre/lib/${VM_CPU}/${VM_TYPE} \
+    -ljvm -lpthread invoke.cxx
+
+./invoke
+exit $?
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/LoadClass/LoadClassNegative.java	Fri Sep 06 11:04:00 2013 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key regression
+ * @bug 8020675
+ * @summary make sure there is no fatal error if a class is loaded from an invalid jar file which is in the bootclasspath
+ * @library /testlibrary
+ * @build TestForName
+ * @build LoadClassNegative
+ * @run main LoadClassNegative
+ */
+
+import java.io.File;
+import com.oracle.java.testlibrary.*;
+
+public class LoadClassNegative {
+
+  public static void main(String args[]) throws Exception {
+    String bootCP = "-Xbootclasspath/a:" + System.getProperty("test.src")
+                       + File.separator + "dummy.jar";
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        bootCP,
+        "TestForName");
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldContain("ClassNotFoundException");
+    output.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/LoadClass/TestForName.java	Fri Sep 06 11:04:00 2013 -0700
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class TestForName {
+    public static void main(String[] args) {
+        try {
+            Class cls = Class.forName("xxx");
+            System.out.println("Class = " + cls.getName());
+        } catch (ClassNotFoundException cnfe) {
+            cnfe.printStackTrace();
+        }
+    }
+}
--- a/hotspot/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Thu Sep 05 02:45:38 2013 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Fri Sep 06 11:04:00 2013 -0700
@@ -84,7 +84,7 @@
             // there is a chance such reservation will fail
             // If it does, it is NOT considered a failure of the feature,
             // rather a possible expected outcome, though not likely
-            output.shouldContain("Could not allocate metaspace at a compatible address");
+            output.shouldContain("Unable to use shared archive");
             output.shouldHaveExitValue(1);
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/contended/Options.java	Fri Sep 06 11:04:00 2013 -0700
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test
+ * @bug     8006997
+ * @summary ContendedPaddingWidth should be range-checked
+ *
+ * @library /testlibrary
+ * @run main Options
+ */
+public class Options {
+
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb;
+        OutputAnalyzer output;
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-128", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-8", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-1", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldContain("must be a multiple of 8");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=0", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=1", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be a multiple of 8");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8184", "-version"); // 8192-8 = 8184
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8191", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be a multiple of 8");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8192", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8193", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldContain("must be a multiple of 8");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8200", "-version"); // 8192+8 = 8200
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldHaveExitValue(1);
+
+   }
+
+}
+