Merge
authoramurillo
Fri, 16 Aug 2013 04:14:12 -0700
changeset 19293 33c42f8baa1d
parent 19264 3418e5ef9d9e (current diff)
parent 19292 4ea37162ea08 (diff)
child 19294 78f23ea4586e
Merge
hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad
hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad
hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.ad
hotspot/src/os_cpu/linux_x86/vm/linux_x86_64.ad
hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.ad
hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad
hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad
hotspot/src/os_cpu/windows_x86/vm/windows_x86_32.ad
hotspot/src/os_cpu/windows_x86/vm/windows_x86_64.ad
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Fri Aug 16 04:14:12 2013 -0700
@@ -44,7 +44,7 @@
     Type type      = db.lookupType("PhaseCFG");
     numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0);
     blocksField = type.getAddressField("_blocks");
-    bbsField = type.getAddressField("_bbs");
+    bbsField = type.getAddressField("_node_to_block_mapping");
     brootField = type.getAddressField("_broot");
   }
 
--- a/hotspot/make/bsd/makefiles/adlc.make	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/bsd/makefiles/adlc.make	Fri Aug 16 04:14:12 2013 -0700
@@ -41,13 +41,11 @@
 
 ifeq ("${Platform_arch_model}", "${Platform_arch}")
   SOURCES.AD = \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
 else
   SOURCES.AD = \
   $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
 endif
 
 EXEC	= $(OUTDIR)/adlc
--- a/hotspot/make/hotspot_version	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/hotspot_version	Fri Aug 16 04:14:12 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=45
+HS_BUILD_NUMBER=46
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/linux/makefiles/adlc.make	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/linux/makefiles/adlc.make	Fri Aug 16 04:14:12 2013 -0700
@@ -41,13 +41,11 @@
 
 ifeq ("${Platform_arch_model}", "${Platform_arch}")
   SOURCES.AD = \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
 else
   SOURCES.AD = \
   $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
 endif
 
 EXEC	= $(OUTDIR)/adlc
--- a/hotspot/make/solaris/makefiles/adlc.make	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/solaris/makefiles/adlc.make	Fri Aug 16 04:14:12 2013 -0700
@@ -42,13 +42,11 @@
 
 ifeq ("${Platform_arch_model}", "${Platform_arch}")
   SOURCES.AD = \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
 else
   SOURCES.AD = \
   $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
-  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
 endif
 
 EXEC	= $(OUTDIR)/adlc
--- a/hotspot/make/solaris/makefiles/dtrace.make	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/solaris/makefiles/dtrace.make	Fri Aug 16 04:14:12 2013 -0700
@@ -283,9 +283,9 @@
 	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
      $(DTraced_Files) ||\
   STATUS=$$?;\
-	if [ x"$$STATUS" = x"1" -a \
-       x`uname -r` = x"5.10" -a \
-       x`uname -p` = x"sparc" ]; then\
+  if [ x"$$STATUS" = x"1" ]; then \
+      if [ x`uname -r` = x"5.10" -a \
+           x`uname -p` = x"sparc" ]; then\
     echo "*****************************************************************";\
     echo "* If you are building server compiler, and the error message is ";\
     echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\
@@ -294,6 +294,20 @@
     echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\
     echo "* dtrace probes for this build.";\
     echo "*****************************************************************";\
+      elif [ x`uname -r` = x"5.10" ]; then\
+    echo "*****************************************************************";\
+    echo "* If you are seeing 'syntax error near \"umpiconninfo_t\"' on Solaris";\
+    echo "* 10, try doing 'cd /usr/lib/dtrace && gzip mpi.d' as root, ";\
+    echo "* or set the environment variable HOTSPOT_DISABLE_DTRACE_PROBES";\
+    echo "* to disable dtrace probes for this build.";\
+    echo "*****************************************************************";\
+      else \
+    echo "*****************************************************************";\
+    echo "* If you cannot fix dtrace build issues, try to ";\
+    echo "* set the environment variable HOTSPOT_DISABLE_DTRACE_PROBES";\
+    echo "* to disable dtrace probes for this build.";\
+    echo "*****************************************************************";\
+      fi; \
   fi;\
   exit $$STATUS
   # Since some DTraced_Files are in LIBJVM.o and they are touched by this
--- a/hotspot/make/windows/create.bat	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/windows/create.bat	Fri Aug 16 04:14:12 2013 -0700
@@ -1,6 +1,6 @@
 @echo off
 REM
-REM Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+REM Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 REM
 REM This code is free software; you can redistribute it and/or modify it
@@ -148,7 +148,7 @@
 
 REM This is now safe to do.
 :copyfiles
-for /D %%i in (compiler1, compiler2, tiered, core) do (
+for /D %%i in (compiler1, compiler2, tiered ) do (
 if NOT EXIST %HotSpotBuildSpace%\%%i\generated mkdir %HotSpotBuildSpace%\%%i\generated
 copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\generated > NUL
 )
@@ -156,7 +156,7 @@
 REM force regneration of ProjectFile
 if exist %ProjectFile% del %ProjectFile%
 
-for /D %%i in (compiler1, compiler2, tiered, core) do (
+for /D %%i in (compiler1, compiler2, tiered ) do (
 echo -- %%i --
 echo # Generated file!                                                        >    %HotSpotBuildSpace%\%%i\local.make
 echo # Changing a variable below and then deleting %ProjectFile% will cause  >>    %HotSpotBuildSpace%\%%i\local.make
--- a/hotspot/make/windows/create_obj_files.sh	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/windows/create_obj_files.sh	Fri Aug 16 04:14:12 2013 -0700
@@ -73,19 +73,17 @@
 
 BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles"
 
-if [ -d "${ALTSRC}/share/vm/jfr" ]; then
-  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
+if [ -d "${ALTSRC}/share/vm/jfr/buffers" ]; then
   BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers"
 fi
 
 BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods"
 
-CORE_PATHS="${BASE_PATHS}"
 # shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS.
 if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then
-  CORE_PATHS="${CORE_PATHS} `$FIND ${ALTSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
+  BASE_PATHS="${BASE_PATHS} `$FIND ${ALTSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
 fi
-CORE_PATHS="${CORE_PATHS} `$FIND ${COMMONSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
+BASE_PATHS="${BASE_PATHS} `$FIND ${COMMONSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
 
 if [ -d "${ALTSRC}/share/vm/c1" ]; then
   COMPILER1_PATHS="${ALTSRC}/share/vm/c1"
@@ -104,12 +102,11 @@
 
 # Include dirs per type.
 case "${TYPE}" in
-    "core")      Src_Dirs="${CORE_PATHS}" ;;
-    "compiler1") Src_Dirs="${CORE_PATHS} ${COMPILER1_PATHS}" ;;
-    "compiler2") Src_Dirs="${CORE_PATHS} ${COMPILER2_PATHS}" ;;
-    "tiered")    Src_Dirs="${CORE_PATHS} ${COMPILER1_PATHS} ${COMPILER2_PATHS}" ;;
-    "zero")      Src_Dirs="${CORE_PATHS}" ;;
-    "shark")     Src_Dirs="${CORE_PATHS}" ;;
+    "compiler1") Src_Dirs="${BASE_PATHS} ${COMPILER1_PATHS}" ;;
+    "compiler2") Src_Dirs="${BASE_PATHS} ${COMPILER2_PATHS}" ;;
+    "tiered")    Src_Dirs="${BASE_PATHS} ${COMPILER1_PATHS} ${COMPILER2_PATHS}" ;;
+    "zero")      Src_Dirs="${BASE_PATHS}" ;;
+    "shark")     Src_Dirs="${BASE_PATHS}" ;;
 esac
 
 COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*"
@@ -122,7 +119,6 @@
 
 # Exclude per type.
 case "${TYPE}" in
-    "core")      Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
     "compiler1") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
     "compiler2") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
     "tiered")    Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
@@ -149,9 +145,17 @@
    Src_Files="${Src_Files}`findsrc ${e}` "
 done 
 
-Obj_Files=
+Obj_Files=" "
 for e in ${Src_Files}; do
-	Obj_Files="${Obj_Files}${e%\.[!.]*}.obj "
+        o="${e%\.[!.]*}.obj"
+        set +e
+        chk=`expr "${Obj_Files}" : ".* $o"`
+        set -e
+        if [ "$chk" != 0 ]; then
+             echo "# INFO: skipping duplicate $o"
+             continue
+        fi
+	Obj_Files="${Obj_Files}$o "
 done
 
 echo Obj_Files=${Obj_Files}
--- a/hotspot/make/windows/makefiles/adlc.make	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/windows/makefiles/adlc.make	Fri Aug 16 04:14:12 2013 -0700
@@ -55,13 +55,11 @@
 
 !if "$(Platform_arch_model)" == "$(Platform_arch)"
 SOURCES_AD=\
-  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
-  $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
+  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad 
 !else
 SOURCES_AD=\
   $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
-  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad \
-  $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
+  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad 
 !endif
 
 # NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_DIR
--- a/hotspot/make/windows/makefiles/projectcreator.make	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/windows/makefiles/projectcreator.make	Fri Aug 16 04:14:12 2013 -0700
@@ -44,10 +44,11 @@
 
 # This is only used internally
 ProjectCreatorIncludesPRIVATE=\
-        -relativeInclude src\closed\share\vm \
-        -relativeInclude src\closed\os\windows\vm \
-        -relativeInclude src\closed\os_cpu\windows_$(Platform_arch)\vm \
-        -relativeInclude src\closed\cpu\$(Platform_arch)\vm \
+        -relativeAltSrcInclude src\closed \
+        -altRelativeInclude share\vm \
+        -altRelativeInclude os\windows\vm \
+        -altRelativeInclude os_cpu\windows_$(Platform_arch)\vm \
+        -altRelativeInclude cpu\$(Platform_arch)\vm \
         -relativeInclude src\share\vm \
         -relativeInclude src\share\vm\precompiled \
         -relativeInclude src\share\vm\prims\wbtestmethods \
@@ -91,7 +92,7 @@
         -disablePch        getThread_windows_$(Platform_arch).cpp \
         -disablePch_compiler2     opcodes.cpp
 
-# Common options for the IDE builds for core, c1, and c2
+# Common options for the IDE builds for c1, and c2
 ProjectCreatorIDEOptions=\
         $(ProjectCreatorIDEOptions) \
         -sourceBase $(HOTSPOTWORKSPACE) \
@@ -158,18 +159,10 @@
  -ignoreFile_TARGET $(Platform_arch_model).ad
 
 ##################################################
-# Without compiler(core) specific options
-##################################################
-ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
-$(ProjectCreatorIDEOptionsIgnoreCompiler1:TARGET=core) \
-$(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=core)
-
-##################################################
 # Client(C1) compiler specific options
 ##################################################
 ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
  -define_compiler1 COMPILER1 \
- -ignorePath_compiler1 core \
 $(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=compiler1)
 
 ##################################################
@@ -178,7 +171,6 @@
 #NOTE! This list must be kept in sync with GENERATED_NAMES in adlc.make.
 ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
  -define_compiler2 COMPILER2 \
- -ignorePath_compiler2 core \
  -additionalFile_compiler2 $(Platform_arch_model).ad \
  -additionalFile_compiler2 ad_$(Platform_arch_model).cpp \
  -additionalFile_compiler2 ad_$(Platform_arch_model).hpp \
--- a/hotspot/make/windows/makefiles/trace.make	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/windows/makefiles/trace.make	Fri Aug 16 04:14:12 2013 -0700
@@ -90,25 +90,25 @@
 !if "$(OPENJDK)" == "true"
 
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating OpenJDK $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
 
 !else
 
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
 
 $(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
 
 $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
 
 $(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
-	@echo Generating $@
+	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
 
 !endif
--- a/hotspot/make/windows/makefiles/vm.make	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/windows/makefiles/vm.make	Fri Aug 16 04:14:12 2013 -0700
@@ -36,10 +36,6 @@
 CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
 !endif
 
-!if "$(Variant)" == "core"
-# No need to define anything, CORE is defined as !COMPILER1 && !COMPILER2
-!endif
-
 !if "$(Variant)" == "compiler1"
 CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1"
 !endif
--- a/hotspot/make/windows/projectfiles/common/Makefile	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/make/windows/projectfiles/common/Makefile	Fri Aug 16 04:14:12 2013 -0700
@@ -112,6 +112,7 @@
 ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions)
 
 $(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
+	@if "$(MSC_VER)"=="1500" echo Make sure you have VS2008 SP1 or later, or you may see 'expanded command line too long'
 	@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
 
 clean:
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1887,6 +1887,27 @@
   if (ProfileInterpreter) {
     __ set_method_data_pointer_for_bcp();
   }
+
+#if INCLUDE_JVMTI
+  if (EnableInvokeDynamic) {
+    Label L_done;
+
+    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
+    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
+
+    __ br_null(G1_scratch, false, Assembler::pn, L_done);
+    __ delayed()->nop();
+
+    __ st_ptr(G1_scratch, Lesp, wordSize);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
   // Resume bytecode interpretation at the current bcp
   __ dispatch_next(vtos);
   // end of JVMTI PopFrame support
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1920,6 +1920,29 @@
   __ get_thread(thread);
   __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
 
+#if INCLUDE_JVMTI
+  if (EnableInvokeDynamic) {
+    Label L_done;
+    const Register local0 = rdi;
+
+    __ cmpb(Address(rsi, 0), Bytecodes::_invokestatic);
+    __ jcc(Assembler::notEqual, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ get_method(rdx);
+    __ movptr(rax, Address(local0, 0));
+    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi);
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, L_done);
+
+    __ movptr(Address(rbx, 0), rax);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
   __ dispatch_next(vtos);
   // end of PopFrame support
 
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1929,6 +1929,29 @@
   __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
           JavaThread::popframe_inactive);
 
+#if INCLUDE_JVMTI
+  if (EnableInvokeDynamic) {
+    Label L_done;
+    const Register local0 = r14;
+
+    __ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
+    __ jcc(Assembler::notEqual, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ get_method(rdx);
+    __ movptr(rax, Address(local0, 0));
+    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, L_done);
+
+    __ movptr(Address(rbx, 0), rax);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
   __ dispatch_next(vtos);
   // end of PopFrame support
 
--- a/hotspot/src/cpu/zero/vm/entryFrame_zero.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/cpu/zero/vm/entryFrame_zero.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -58,8 +58,8 @@
                            JavaCallWrapper* call_wrapper,
                            TRAPS);
  public:
-  JavaCallWrapper *call_wrapper() const {
-    return (JavaCallWrapper *) value_of_word(call_wrapper_off);
+  JavaCallWrapper **call_wrapper() const {
+    return (JavaCallWrapper **) addr_of_word(call_wrapper_off);
   }
 
  public:
--- a/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -141,7 +141,7 @@
   return fp();
 }
 
-inline JavaCallWrapper* frame::entry_frame_call_wrapper() const {
+inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
   return zero_entryframe()->call_wrapper();
 }
 
--- a/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -176,6 +176,19 @@
       StubRoutines::_oop_arraycopy;
   }
 
+  static int SafeFetch32(int *adr, int errValue) {
+    int value = errValue;
+    value = *adr;
+    return value;
+  }
+
+  static intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
+    intptr_t value = errValue;
+    value = *adr;
+    return value;
+  }
+
+
   void generate_initial() {
     // Generates all stubs and initializes the entry points
 
@@ -225,6 +238,15 @@
 
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
+
+    // Safefetch stubs.
+    StubRoutines::_safefetch32_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetch32);
+    StubRoutines::_safefetch32_fault_pc = NULL;
+    StubRoutines::_safefetch32_continuation_pc = NULL;
+
+    StubRoutines::_safefetchN_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetchN);
+    StubRoutines::_safefetchN_fault_pc = NULL;
+    StubRoutines::_safefetchN_continuation_pc = NULL;
   }
 
  public:
--- a/hotspot/src/os/bsd/vm/attachListener_bsd.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/os/bsd/vm/attachListener_bsd.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -445,14 +445,14 @@
 
 void AttachListener::vm_start() {
   char fn[UNIX_PATH_MAX];
-  struct stat64 st;
+  struct stat st;
   int ret;
 
   int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
            os::get_temp_directory(), os::current_process_id());
   assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
 
-  RESTARTABLE(::stat64(fn, &st), ret);
+  RESTARTABLE(::stat(fn, &st), ret);
   if (ret == 0) {
     ret = ::unlink(fn);
     if (ret == -1) {
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1642,6 +1642,8 @@
 
 void os::win32::print_windows_version(outputStream* st) {
   OSVERSIONINFOEX osvi;
+  SYSTEM_INFO si;
+
   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
 
@@ -1651,6 +1653,18 @@
   }
 
   int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
+
+  ZeroMemory(&si, sizeof(SYSTEM_INFO));
+  if (os_vers >= 5002) {
+    // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
+    // find out whether we are running on 64 bit processor or not.
+    if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
+      os::Kernel32Dll::GetNativeSystemInfo(&si);
+    } else {
+      GetSystemInfo(&si);
+    }
+  }
+
   if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
     switch (os_vers) {
     case 3051: st->print(" Windows NT 3.51"); break;
@@ -1658,57 +1672,48 @@
     case 5000: st->print(" Windows 2000"); break;
     case 5001: st->print(" Windows XP"); break;
     case 5002:
-    case 6000:
-    case 6001:
-    case 6002: {
-      // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
-      // find out whether we are running on 64 bit processor or not.
-      SYSTEM_INFO si;
-      ZeroMemory(&si, sizeof(SYSTEM_INFO));
-        if (!os::Kernel32Dll::GetNativeSystemInfoAvailable()){
-          GetSystemInfo(&si);
+      if (osvi.wProductType == VER_NT_WORKSTATION &&
+          si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+        st->print(" Windows XP x64 Edition");
       } else {
-        os::Kernel32Dll::GetNativeSystemInfo(&si);
+        st->print(" Windows Server 2003 family");
       }
-      if (os_vers == 5002) {
-        if (osvi.wProductType == VER_NT_WORKSTATION &&
-            si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-          st->print(" Windows XP x64 Edition");
-        else
-            st->print(" Windows Server 2003 family");
-      } else if (os_vers == 6000) {
-        if (osvi.wProductType == VER_NT_WORKSTATION)
-            st->print(" Windows Vista");
-        else
-            st->print(" Windows Server 2008");
-        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-            st->print(" , 64 bit");
-      } else if (os_vers == 6001) {
-        if (osvi.wProductType == VER_NT_WORKSTATION) {
-            st->print(" Windows 7");
-        } else {
-            // Unrecognized windows, print out its major and minor versions
-            st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
-        }
-        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-            st->print(" , 64 bit");
-      } else if (os_vers == 6002) {
-        if (osvi.wProductType == VER_NT_WORKSTATION) {
-            st->print(" Windows 8");
-        } else {
-            st->print(" Windows Server 2012");
-        }
-        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-            st->print(" , 64 bit");
-      } else { // future os
-        // Unrecognized windows, print out its major and minor versions
-        st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
-        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
-            st->print(" , 64 bit");
+      break;
+
+    case 6000:
+      if (osvi.wProductType == VER_NT_WORKSTATION) {
+        st->print(" Windows Vista");
+      } else {
+        st->print(" Windows Server 2008");
       }
       break;
-    }
-    default: // future windows, print out its major and minor versions
+
+    case 6001:
+      if (osvi.wProductType == VER_NT_WORKSTATION) {
+        st->print(" Windows 7");
+      } else {
+        st->print(" Windows Server 2008 R2");
+      }
+      break;
+
+    case 6002:
+      if (osvi.wProductType == VER_NT_WORKSTATION) {
+        st->print(" Windows 8");
+      } else {
+        st->print(" Windows Server 2012");
+      }
+      break;
+
+    case 6003:
+      if (osvi.wProductType == VER_NT_WORKSTATION) {
+        st->print(" Windows 8.1");
+      } else {
+        st->print(" Windows Server 2012 R2");
+      }
+      break;
+
+    default: // future os
+      // Unrecognized windows, print out its major and minor versions
       st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
     }
   } else {
@@ -1720,6 +1725,11 @@
       st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
     }
   }
+
+  if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+    st->print(" , 64 bit");
+  }
+
   st->print(" Build %d", osvi.dwBuildNumber);
   st->print(" %s", osvi.szCSDVersion);           // service pack
   st->cr();
--- a/hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-//
-// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// X86 Bsd Architecture Description File
-
--- a/hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-//
-// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// AMD64 Bsd Architecture Description File
-
-//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
-// This block specifies the encoding classes used by the compiler to
-// output byte streams.  Encoding classes generate functions which are
-// called by Machine Instruction Nodes in order to generate the bit
-// encoding of the instruction.  Operands specify their base encoding
-// interface with the interface keyword.  There are currently
-// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
-// COND_INTER.  REG_INTER causes an operand to generate a function
-// which returns its register number when queried.  CONST_INTER causes
-// an operand to generate a function which returns the value of the
-// constant when queried.  MEMORY_INTER causes an operand to generate
-// four functions which return the Base Register, the Index Register,
-// the Scale Value, and the Offset Value of the operand when queried.
-// COND_INTER causes an operand to generate six functions which return
-// the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional
-// instruction.  Instructions specify two basic values for encoding.
-// They use the ins_encode keyword to specify their encoding class
-// (which must be one of the class names specified in the encoding
-// block), and they use the opcode keyword to specify, in order, their
-// primary, secondary, and tertiary opcode.  Only the opcode sections
-// which a particular instruction needs for encoding need to be
-// specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-%}
-
-
-// Platform dependent source
-
-source %{
-
-%}
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-//
-// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// X86 Linux Architecture Description File
-
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-//
-// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// AMD64 Linux Architecture Description File
-
-//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
-// This block specifies the encoding classes used by the compiler to
-// output byte streams.  Encoding classes generate functions which are
-// called by Machine Instruction Nodes in order to generate the bit
-// encoding of the instruction.  Operands specify their base encoding
-// interface with the interface keyword.  There are currently
-// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
-// COND_INTER.  REG_INTER causes an operand to generate a function
-// which returns its register number when queried.  CONST_INTER causes
-// an operand to generate a function which returns the value of the
-// constant when queried.  MEMORY_INTER causes an operand to generate
-// four functions which return the Base Register, the Index Register,
-// the Scale Value, and the Offset Value of the operand when queried.
-// COND_INTER causes an operand to generate six functions which return
-// the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional
-// instruction.  Instructions specify two basic values for encoding.
-// They use the ins_encode keyword to specify their encoding class
-// (which must be one of the class names specified in the encoding
-// block), and they use the opcode keyword to specify, in order, their
-// primary, secondary, and tertiary opcode.  Only the opcode sections
-// which a particular instruction needs for encoding need to be
-// specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-%}
-
-
-// Platform dependent source
-
-source %{
-
-%}
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -410,16 +410,6 @@
   int SpinPause() {
   }
 
-  int SafeFetch32(int *adr, int errValue) {
-    int value = errValue;
-    value = *adr;
-    return value;
-  }
-  intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
-    intptr_t value = errValue;
-    value = *adr;
-    return value;
-  }
 
   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
     if (from > to) {
--- a/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-//
-// Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-
-//
-//
-
-// SPARC Solaris Architecture Description File
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-//
-// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// X86 Solaris Architecture Description File
-
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-//
-// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// AMD64 Solaris Architecture Description File
-
-//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
-// This block specifies the encoding classes used by the compiler to
-// output byte streams.  Encoding classes generate functions which are
-// called by Machine Instruction Nodes in order to generate the bit
-// encoding of the instruction.  Operands specify their base encoding
-// interface with the interface keyword.  There are currently
-// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
-// COND_INTER.  REG_INTER causes an operand to generate a function
-// which returns its register number when queried.  CONST_INTER causes
-// an operand to generate a function which returns the value of the
-// constant when queried.  MEMORY_INTER causes an operand to generate
-// four functions which return the Base Register, the Index Register,
-// the Scale Value, and the Offset Value of the operand when queried.
-// COND_INTER causes an operand to generate six functions which return
-// the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional
-// instruction.  Instructions specify two basic values for encoding.
-// They use the ins_encode keyword to specify their encoding class
-// (which must be one of the class names specified in the encoding
-// block), and they use the opcode keyword to specify, in order, their
-// primary, secondary, and tertiary opcode.  Only the opcode sections
-// which a particular instruction needs for encoding need to be
-// specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-%}
-
-
-// Platform dependent source
-
-source %{
-%}
--- a/hotspot/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-//
-// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// X86 Win32 Architecture Description File
-
--- a/hotspot/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Thu Aug 15 09:25:33 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-//
-// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
-
-// AMD64 Win32 Architecture Description File
-
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-%}
-
-
-// Platform dependent source
-
-source %{
-
-%}
--- a/hotspot/src/share/tools/ProjectCreator/BuildConfig.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/tools/ProjectCreator/BuildConfig.java	Fri Aug 16 04:14:12 2013 -0700
@@ -142,6 +142,69 @@
         return rv;
     }
 
+    // Returns true if the specified path refers to a relative alternate
+    // source file. RelativeAltSrcInclude is usually "src\closed".
+    public static boolean matchesRelativeAltSrcInclude(String path) {
+        String relativeAltSrcInclude =
+            getFieldString(null, "RelativeAltSrcInclude");
+        Vector<String> v = getFieldVector(null, "AltRelativeInclude");
+        for (String pathPart : v) {
+            if (path.contains(relativeAltSrcInclude + Util.sep + pathPart))  {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    // Returns the relative alternate source file for the specified path.
+    // Null is returned if the specified path does not have a matching
+    // alternate source file.
+    public static String getMatchingRelativeAltSrcFile(String path) {
+        Vector<String> v = getFieldVector(null, "RelativeAltSrcFileList");
+        if (v == null) {
+            return null;
+        }
+        for (String pathPart : v) {
+            if (path.endsWith(pathPart)) {
+                String relativeAltSrcInclude =
+                    getFieldString(null, "RelativeAltSrcInclude");
+                return relativeAltSrcInclude + Util.sep + pathPart;
+            }
+        }
+        return null;
+    }
+
+    // Returns true if the specified path has a matching alternate
+    // source file.
+    public static boolean matchesRelativeAltSrcFile(String path) {
+        return getMatchingRelativeAltSrcFile(path) != null;
+    }
+
+    // Track the specified alternate source file. The source file is
+    // tracked without the leading .*<sep><RelativeAltSrcFileList><sep>
+    // part to make matching regular source files easier.
+    public static void trackRelativeAltSrcFile(String path) {
+        String pattern = getFieldString(null, "RelativeAltSrcInclude") +
+            Util.sep;
+        int altSrcInd = path.indexOf(pattern);
+        if (altSrcInd == -1) {
+            // not an AltSrc path
+            return;
+        }
+
+        altSrcInd += pattern.length();
+        if (altSrcInd >= path.length()) {
+            // not a valid AltSrc path
+            return;
+        }
+
+        String altSrcFile = path.substring(altSrcInd);
+        Vector v = getFieldVector(null, "RelativeAltSrcFileList");
+        if (v == null || !v.contains(altSrcFile)) {
+            addFieldVector(null, "RelativeAltSrcFileList", altSrcFile);
+        }
+    }
+
     void addTo(Hashtable ht, String key, String value) {
         ht.put(expandFormat(key), expandFormat(value));
     }
@@ -272,8 +335,19 @@
 
     private Vector getSourceIncludes() {
         Vector<String> rv = new Vector<String>();
+        String sourceBase = getFieldString(null, "SourceBase");
+
+        // add relative alternate source include values:
+        String relativeAltSrcInclude =
+            getFieldString(null, "RelativeAltSrcInclude");
+        Vector<String> asri = new Vector<String>();
+        collectRelevantVectors(asri, "AltRelativeInclude");
+        for (String f : asri) {
+            rv.add(sourceBase + Util.sep + relativeAltSrcInclude +
+                   Util.sep + f);
+        }
+
         Vector<String> ri = new Vector<String>();
-        String sourceBase = getFieldString(null, "SourceBase");
         collectRelevantVectors(ri, "RelativeInclude");
         for (String f : ri) {
             rv.add(sourceBase + Util.sep + f);
@@ -541,35 +615,6 @@
     }
 }
 
-class CoreDebugConfig extends GenericDebugNonKernelConfig {
-    String getOptFlag() {
-        return getCI().getNoOptFlag();
-    }
-
-    CoreDebugConfig() {
-        initNames("core", "debug", "jvm.dll");
-        init(getIncludes(), getDefines());
-    }
-}
-
-class CoreFastDebugConfig extends GenericDebugNonKernelConfig {
-    String getOptFlag() {
-        return getCI().getOptFlag();
-    }
-
-    CoreFastDebugConfig() {
-        initNames("core", "fastdebug", "jvm.dll");
-        init(getIncludes(), getDefines());
-    }
-}
-
-class CoreProductConfig extends ProductConfig {
-    CoreProductConfig() {
-        initNames("core", "product", "jvm.dll");
-        init(getIncludes(), getDefines());
-    }
-}
-
 
 abstract class CompilerInterface {
     abstract Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir);
--- a/hotspot/src/share/tools/ProjectCreator/FileTreeCreator.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/tools/ProjectCreator/FileTreeCreator.java	Fri Aug 16 04:14:12 2013 -0700
@@ -12,11 +12,15 @@
    final int startDirLength;
    Stack<DirAttributes> attributes = new Stack<DirAttributes>();
    Vector<BuildConfig> allConfigs;
-   WinGammaPlatformVC10 wg;
+   WinGammaPlatform wg;
+   WinGammaPlatformVC10 wg10;
 
-   public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatformVC10 wg) {
+   public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
       super();
       this.wg = wg;
+      if (wg instanceof WinGammaPlatformVC10) {
+          wg10 = (WinGammaPlatformVC10)wg;
+      }
       this.allConfigs = allConfigs;
       this.startDir = startDir;
       startDirLength = startDir.toAbsolutePath().toString().length();
--- a/hotspot/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java	Fri Aug 16 04:14:12 2013 -0700
@@ -1,3 +1,27 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
 import static java.nio.file.FileVisitResult.CONTINUE;
 
 import java.io.IOException;
@@ -21,6 +45,8 @@
          boolean usePch = false;
          boolean disablePch = false;
          boolean useIgnore = false;
+         boolean isAltSrc = false;  // only needed as a debugging crumb
+         boolean isReplacedByAltSrc = false;
          String fileName = file.getFileName().toString();
 
          // TODO hideFile
@@ -30,6 +56,26 @@
             usePch = true;
          }
 
+         String fileLoc = vcProjLocation.relativize(file).toString();
+
+         // isAltSrc and isReplacedByAltSrc applies to all configs for a file
+         if (BuildConfig.matchesRelativeAltSrcInclude(
+               file.toAbsolutePath().toString())) {
+            // current file is an alternate source file so track it
+            isAltSrc = true;
+            BuildConfig.trackRelativeAltSrcFile(
+                file.toAbsolutePath().toString());
+         } else if (BuildConfig.matchesRelativeAltSrcFile(
+                    file.toAbsolutePath().toString())) {
+            // current file is a regular file that matches an alternate
+            // source file so yack about replacing the regular file
+            isReplacedByAltSrc = true;
+            System.out.println("INFO: alternate source file '" +
+                               BuildConfig.getMatchingRelativeAltSrcFile(
+                                   file.toAbsolutePath().toString()) +
+                               "' replaces '" + fileLoc + "'");
+         }
+
          for (BuildConfig cfg : allConfigs) {
             if (cfg.lookupHashFieldInContext("IgnoreFile", fileName) != null) {
                useIgnore = true;
@@ -57,10 +103,9 @@
             }
          }
 
-         String tagName = wg.getFileTagFromSuffix(fileName);
-         String fileLoc = vcProjLocation.relativize(file).toString();
+         String tagName = wg10.getFileTagFromSuffix(fileName);
 
-         if (!useIgnore && !disablePch && !usePch) {
+         if (!useIgnore && !disablePch && !usePch && !isReplacedByAltSrc) {
             wg.tag(tagName, new String[] { "Include", fileLoc});
          } else {
             wg.startTag(
@@ -78,12 +123,17 @@
                if (disablePch) {
                   wg.tag("PrecompiledHeader", "Condition", "'$(Configuration)|$(Platform)'=='" + cfg.get("Name") + "'");
                }
+               if (isReplacedByAltSrc) {
+                  wg.tagData("ExcludedFromBuild", "true", "Condition",
+                             "'$(Configuration)|$(Platform)'=='" +
+                             cfg.get("Name") + "'");
+               }
             }
             wg.endTag();
          }
 
          String filter = startDir.relativize(file.getParent().toAbsolutePath()).toString();
-         wg.addFilterDependency(fileLoc, filter);
+         wg10.addFilterDependency(fileLoc, filter);
 
          return CONTINUE;
       }
@@ -112,7 +162,7 @@
          if (!hide) {
             String name = startDir.relativize(path.toAbsolutePath()).toString();
             if (!"".equals(name)) {
-               wg.addFilter(name);
+               wg10.addFilter(name);
             }
 
             attributes.push(newAttr);
@@ -137,6 +187,4 @@
       public void writeFileTree() throws IOException {
          Files.walkFileTree(this.startDir, this);
       }
-
-
-   }
\ No newline at end of file
+}
--- a/hotspot/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java	Fri Aug 16 04:14:12 2013 -0700
@@ -12,7 +12,7 @@
 public class FileTreeCreatorVC7 extends FileTreeCreator {
 
       public FileTreeCreatorVC7(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
-         super(startDir, allConfigs, null);
+         super(startDir, allConfigs, wg);
       }
 
       @Override
--- a/hotspot/src/share/tools/ProjectCreator/ProjectCreator.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/tools/ProjectCreator/ProjectCreator.java	Fri Aug 16 04:14:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,10 +39,15 @@
             + "jvm.dll; no trailing slash>");
       System.err.println("  If any of the above are specified, "
             + "they must all be.");
+      System.err.println("  Note: if '-altRelativeInclude' option below is "
+            + "used, then the '-relativeAltSrcInclude' option must be used "
+            + "to specify the alternate source dir, e.g., 'src\\closed'");
       System.err.println("  Additional, optional arguments, which can be "
             + "specified multiple times:");
       System.err.println("    -absoluteInclude <string containing absolute "
             + "path to include directory>");
+      System.err.println("    -altRelativeInclude <string containing "
+            + "alternate include directory relative to -envVar>");
       System.err.println("    -relativeInclude <string containing include "
             + "directory relative to -envVar>");
       System.err.println("    -define <preprocessor flag to be #defined "
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatform.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatform.java	Fri Aug 16 04:14:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,10 +140,17 @@
                            "already exist>");
         System.err.println("  If any of the above are specified, "+
                            "they must all be.");
+        System.err.println("  Note: if '-altRelativeInclude' option below " +
+                           "is used, then the '-relativeAltSrcInclude' " +
+                           "option must be used to specify the alternate " +
+                           "source dir, e.g., 'src\\closed'");
         System.err.println("  Additional, optional arguments, which can be " +
                            "specified multiple times:");
         System.err.println("    -absoluteInclude <string containing absolute " +
                            "path to include directory>");
+        System.err.println("    -altRelativeInclude <string containing " +
+                           "alternate include directory relative to " +
+                           "-sourceBase>");
         System.err.println("    -relativeInclude <string containing include " +
                            "directory relative to -sourceBase>");
         System.err.println("    -define <preprocessor flag to be #defined " +
@@ -343,6 +350,12 @@
                               HsArgHandler.VECTOR
                               ),
 
+                new HsArgRule("-altRelativeInclude",
+                              "AltRelativeInclude",
+                              null,
+                              HsArgHandler.VECTOR
+                              ),
+
                 new HsArgRule("-relativeInclude",
                               "RelativeInclude",
                               null,
@@ -355,6 +368,12 @@
                               HsArgHandler.VECTOR
                               ),
 
+                new HsArgRule("-relativeAltSrcInclude",
+                              "RelativeAltSrcInclude",
+                              null,
+                              HsArgHandler.STRING
+                              ),
+
                 new HsArgRule("-relativeSrcInclude",
                               "RelativeSrcInclude",
                               null,
@@ -560,10 +579,6 @@
         allConfigs.add(new TieredFastDebugConfig());
         allConfigs.add(new TieredProductConfig());
 
-        allConfigs.add(new CoreDebugConfig());
-        allConfigs.add(new CoreFastDebugConfig());
-        allConfigs.add(new CoreProductConfig());
-
         return allConfigs;
     }
 
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Fri Aug 16 04:14:12 2013 -0700
@@ -1,3 +1,27 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -24,7 +48,7 @@
     public void writeProjectFile(String projectFileName, String projectName,
             Vector<BuildConfig> allConfigs) throws IOException {
         System.out.println();
-        System.out.print("    Writing .vcxproj file: " + projectFileName);
+        System.out.println("    Writing .vcxproj file: " + projectFileName);
 
         String projDir = Util.normalize(new File(projectFileName).getParent());
 
@@ -114,7 +138,7 @@
 
         endTag();
         printWriter.close();
-        System.out.println("    Done.");
+        System.out.println("    Done writing .vcxproj file.");
 
         writeFilterFile(projectFileName, projectName, allConfigs, projDir);
         writeUserFile(projectFileName, allConfigs);
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Fri Aug 16 04:14:12 2013 -0700
@@ -139,19 +139,22 @@
 
       tagV("Tool", cfg.getV("LinkerFlags"));
 
-      tag("Tool",
-            new String[] {
-            "Name",
-            "VCPostBuildEventTool",
-            "Description",
-            BuildConfig
-            .getFieldString(null, "PostbuildDescription"),
-            // Caution: String.replace(String,String) is available
-            // from JDK5 onwards only
-            "CommandLine",
-            cfg.expandFormat(BuildConfig.getFieldString(null,
-                  "PostbuildCommand").replace("\t",
-                        "&#x0D;&#x0A;")) });
+      String postBuildCmd = BuildConfig.getFieldString(null,
+            "PostbuildCommand");
+      if (postBuildCmd != null) {
+         tag("Tool",
+               new String[] {
+               "Name",
+               "VCPostBuildEventTool",
+               "Description",
+               BuildConfig
+               .getFieldString(null, "PostbuildDescription"),
+               // Caution: String.replace(String,String) is available
+               // from JDK5 onwards only
+               "CommandLine",
+                   cfg.expandFormat(postBuildCmd.replace("\t",
+                           "&#x0D;&#x0A;")) });
+      }
 
       tag("Tool", new String[] { "Name", "VCPreBuildEventTool" });
 
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -2557,6 +2557,26 @@
   *offset = value;
 }
 
+// Support for java_lang_invoke_DirectMethodHandle
+
+int java_lang_invoke_DirectMethodHandle::_member_offset;
+
+oop java_lang_invoke_DirectMethodHandle::member(oop dmh) {
+  oop member_name = NULL;
+  bool is_dmh = dmh->is_oop() && java_lang_invoke_DirectMethodHandle::is_instance(dmh);
+  assert(is_dmh, "a DirectMethodHandle oop is expected");
+  if (is_dmh) {
+    member_name = dmh->obj_field(member_offset_in_bytes());
+  }
+  return member_name;
+}
+
+void java_lang_invoke_DirectMethodHandle::compute_offsets() {
+  Klass* klass_oop = SystemDictionary::DirectMethodHandle_klass();
+  if (klass_oop != NULL && EnableInvokeDynamic) {
+    compute_offset(_member_offset, klass_oop, vmSymbols::member_name(), vmSymbols::java_lang_invoke_MemberName_signature());
+  }
+}
 
 // Support for java_lang_invoke_MethodHandle
 
@@ -3205,6 +3225,7 @@
   java_lang_ThreadGroup::compute_offsets();
   if (EnableInvokeDynamic) {
     java_lang_invoke_MethodHandle::compute_offsets();
+    java_lang_invoke_DirectMethodHandle::compute_offsets();
     java_lang_invoke_MemberName::compute_offsets();
     java_lang_invoke_LambdaForm::compute_offsets();
     java_lang_invoke_MethodType::compute_offsets();
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -976,6 +976,32 @@
   static int form_offset_in_bytes()             { return _form_offset; }
 };
 
+// Interface to java.lang.invoke.DirectMethodHandle objects
+
+class java_lang_invoke_DirectMethodHandle: AllStatic {
+  friend class JavaClasses;
+
+ private:
+  static int _member_offset;               // the MemberName of this DMH
+
+  static void compute_offsets();
+
+ public:
+  // Accessors
+  static oop  member(oop mh);
+
+  // Testers
+  static bool is_subclass(Klass* klass) {
+    return klass->is_subclass_of(SystemDictionary::DirectMethodHandle_klass());
+  }
+  static bool is_instance(oop obj) {
+    return obj != NULL && is_subclass(obj->klass());
+  }
+
+  // Accessors for code generation:
+  static int member_offset_in_bytes()           { return _member_offset; }
+};
+
 // Interface to java.lang.invoke.LambdaForm objects
 // (These are a private interface for managing adapter code generation.)
 
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -151,6 +151,7 @@
   do_klass(reflect_CallerSensitive_klass,               sun_reflect_CallerSensitive,               Opt                 ) \
                                                                                                                          \
   /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */                                            \
+  do_klass(DirectMethodHandle_klass,                    java_lang_invoke_DirectMethodHandle,       Opt                 ) \
   do_klass(MethodHandle_klass,                          java_lang_invoke_MethodHandle,             Pre_JSR292          ) \
   do_klass(MemberName_klass,                            java_lang_invoke_MemberName,               Pre_JSR292          ) \
   do_klass(MethodHandleNatives_klass,                   java_lang_invoke_MethodHandleNatives,      Pre_JSR292          ) \
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -255,6 +255,7 @@
   /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */                                   \
   template(java_lang_invoke_CallSite,                 "java/lang/invoke/CallSite")                \
   template(java_lang_invoke_ConstantCallSite,         "java/lang/invoke/ConstantCallSite")        \
+  template(java_lang_invoke_DirectMethodHandle,       "java/lang/invoke/DirectMethodHandle")      \
   template(java_lang_invoke_MutableCallSite,          "java/lang/invoke/MutableCallSite")         \
   template(java_lang_invoke_VolatileCallSite,         "java/lang/invoke/VolatileCallSite")        \
   template(java_lang_invoke_MethodHandle,             "java/lang/invoke/MethodHandle")            \
@@ -352,6 +353,7 @@
   template(thread_id_name,                            "tid")                                      \
   template(newInstance0_name,                         "newInstance0")                             \
   template(limit_name,                                "limit")                                    \
+  template(member_name,                               "member")                                   \
   template(forName_name,                              "forName")                                  \
   template(forName0_name,                             "forName0")                                 \
   template(isJavaIdentifierStart_name,                "isJavaIdentifierStart")                    \
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -50,6 +50,7 @@
 #include "memory/genMarkSweep.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/iterator.hpp"
+#include "memory/padded.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/tenuredGeneration.hpp"
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -927,11 +927,9 @@
                                    workers->active_workers(),
                                    Threads::number_of_non_daemon_threads());
   workers->set_active_workers(active_workers);
-  _next_gen = gch->next_gen(this);
-  assert(_next_gen != NULL,
-    "This must be the youngest gen, and not the only gen");
   assert(gch->n_gens() == 2,
          "Par collection currently only works with single older gen.");
+  _next_gen = gch->next_gen(this);
   // Do we have to avoid promotion_undo?
   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
     set_avoid_promotion_undo(true);
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/defNewGeneration.hpp"
+#include "memory/padded.hpp"
 #include "utilities/taskqueue.hpp"
 
 class ChunkArray;
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PAROOPCLOSURES_HPP
 
 #include "memory/genOopClosures.hpp"
+#include "memory/padded.hpp"
 
 // Closures for ParNewGeneration
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -29,14 +29,16 @@
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/mutableSpace.hpp"
+#include "memory/allocation.inline.hpp"
 #include "memory/memRegion.hpp"
+#include "memory/padded.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.psgc.inline.hpp"
 
-PSPromotionManager**         PSPromotionManager::_manager_array = NULL;
-OopStarTaskQueueSet*         PSPromotionManager::_stack_array_depth = NULL;
-PSOldGen*                    PSPromotionManager::_old_gen = NULL;
-MutableSpace*                PSPromotionManager::_young_space = NULL;
+PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
+OopStarTaskQueueSet*           PSPromotionManager::_stack_array_depth = NULL;
+PSOldGen*                      PSPromotionManager::_old_gen = NULL;
+MutableSpace*                  PSPromotionManager::_young_space = NULL;
 
 void PSPromotionManager::initialize() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -45,8 +47,10 @@
   _old_gen = heap->old_gen();
   _young_space = heap->young_gen()->to_space();
 
+  // To prevent false sharing, we pad the PSPromotionManagers
+  // and make sure that the first instance starts at a cache line.
   assert(_manager_array == NULL, "Attempt to initialize twice");
-  _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC);
+  _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
   guarantee(_manager_array != NULL, "Could not initialize promotion manager");
 
   _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
@@ -54,26 +58,21 @@
 
   // Create and register the PSPromotionManager(s) for the worker threads.
   for(uint i=0; i<ParallelGCThreads; i++) {
-    _manager_array[i] = new PSPromotionManager();
-    guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
-    stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
+    stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
   }
-
   // The VMThread gets its own PSPromotionManager, which is not available
   // for work stealing.
-  _manager_array[ParallelGCThreads] = new PSPromotionManager();
-  guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
 }
 
 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
   assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
   assert(_manager_array != NULL, "Sanity");
-  return _manager_array[index];
+  return &_manager_array[index];
 }
 
 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
   assert(_manager_array != NULL, "Sanity");
-  return _manager_array[ParallelGCThreads];
+  return &_manager_array[ParallelGCThreads];
 }
 
 void PSPromotionManager::pre_scavenge() {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -29,6 +29,8 @@
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/allocation.hpp"
+#include "memory/padded.hpp"
+#include "utilities/globalDefinitions.hpp"
 #include "utilities/taskqueue.hpp"
 
 //
@@ -51,14 +53,14 @@
 class PSOldGen;
 class ParCompactionManager;
 
-class PSPromotionManager : public CHeapObj<mtGC> {
+class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
   friend class PSScavenge;
   friend class PSRefProcTaskExecutor;
  private:
-  static PSPromotionManager**         _manager_array;
-  static OopStarTaskQueueSet*         _stack_array_depth;
-  static PSOldGen*                    _old_gen;
-  static MutableSpace*                _young_space;
+  static PaddedEnd<PSPromotionManager>* _manager_array;
+  static OopStarTaskQueueSet*           _stack_array_depth;
+  static PSOldGen*                      _old_gen;
+  static MutableSpace*                  _young_space;
 
 #if TASKQUEUE_STATS
   size_t                              _masked_pushes;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -32,7 +32,7 @@
 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
   assert(_manager_array != NULL, "access of NULL manager_array");
   assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
-  return _manager_array[index];
+  return &_manager_array[index];
 }
 
 template <class T>
--- a/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -32,6 +32,7 @@
 #if INCLUDE_SERVICES
 
 void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) {
+#if INCLUDE_TRACE
   assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
          "Only call this method if the event is enabled");
 
@@ -42,6 +43,7 @@
   event.set_totalSize(entry->words() * BytesPerWord);
   event.set_endtime(timestamp);
   event.commit();
+#endif // INCLUDE_TRACE
 }
 
 bool ObjectCountEventSender::should_send_event() {
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1209,3 +1209,26 @@
                        size_of_arguments * Interpreter::stackElementSize);
 IRT_END
 #endif
+
+#if INCLUDE_JVMTI
+// This is a support of the JVMTI PopFrame interface.
+// Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument
+// and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters.
+// The dmh argument is a reference to a DirectMethoHandle that has a member name field.
+IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address dmh,
+                                                            Method* method, address bcp))
+  Bytecodes::Code code = Bytecodes::code_at(method, bcp);
+  if (code != Bytecodes::_invokestatic) {
+    return;
+  }
+  ConstantPool* cpool = method->constants();
+  int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG;
+  Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index));
+  Symbol* mname = cpool->name_ref_at(cp_index);
+
+  if (MethodHandles::has_member_arg(cname, mname)) {
+    oop member_name = java_lang_invoke_DirectMethodHandle::member((oop)dmh);
+    thread->set_vm_result(member_name);
+  }
+IRT_END
+#endif // INCLUDE_JVMTI
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -95,6 +95,9 @@
   static void    create_exception(JavaThread* thread, char* name, char* message);
   static void    create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
   static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
+#if INCLUDE_JVMTI
+  static void    member_name_arg_or_null(JavaThread* thread, address dmh, Method* m, address bcp);
+#endif
   static void    throw_pending_exception(JavaThread* thread);
 
   // Statics & fields
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -310,46 +310,31 @@
   _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
 }
 
-void CardTableRS::clear_into_younger(Generation* gen) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  // Generations younger than gen have been evacuated. We can clear
-  // card table entries for gen (we know that it has no pointers
-  // to younger gens) and for those below. The card tables for
-  // the youngest gen need never be cleared.
+void CardTableRS::clear_into_younger(Generation* old_gen) {
+  assert(old_gen->level() == 1, "Should only be called for the old generation");
+  // The card tables for the youngest gen need never be cleared.
   // There's a bit of subtlety in the clear() and invalidate()
   // methods that we exploit here and in invalidate_or_clear()
   // below to avoid missing cards at the fringes. If clear() or
   // invalidate() are changed in the future, this code should
   // be revisited. 20040107.ysr
-  Generation* g = gen;
-  for(Generation* prev_gen = gch->prev_gen(g);
-      prev_gen != NULL;
-      g = prev_gen, prev_gen = gch->prev_gen(g)) {
-    MemRegion to_be_cleared_mr = g->prev_used_region();
-    clear(to_be_cleared_mr);
-  }
+  clear(old_gen->prev_used_region());
 }
 
-void CardTableRS::invalidate_or_clear(Generation* gen, bool younger) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  // For each generation gen (and younger)
-  // invalidate the cards for the currently occupied part
-  // of that generation and clear the cards for the
+void CardTableRS::invalidate_or_clear(Generation* old_gen) {
+  assert(old_gen->level() == 1, "Should only be called for the old generation");
+  // Invalidate the cards for the currently occupied part of
+  // the old generation and clear the cards for the
   // unoccupied part of the generation (if any, making use
   // of that generation's prev_used_region to determine that
   // region). No need to do anything for the youngest
   // generation. Also see note#20040107.ysr above.
-  Generation* g = gen;
-  for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL;
-      g = prev_gen, prev_gen = gch->prev_gen(g))  {
-    MemRegion used_mr = g->used_region();
-    MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
-    if (!to_be_cleared_mr.is_empty()) {
-      clear(to_be_cleared_mr);
-    }
-    invalidate(used_mr);
-    if (!younger) break;
+  MemRegion used_mr = old_gen->used_region();
+  MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
+  if (!to_be_cleared_mr.is_empty()) {
+    clear(to_be_cleared_mr);
   }
+  invalidate(used_mr);
 }
 
 
--- a/hotspot/src/share/vm/memory/cardTableRS.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/cardTableRS.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -142,12 +142,12 @@
   void verify_aligned_region_empty(MemRegion mr);
 
   void clear(MemRegion mr) { _ct_bs->clear(mr); }
-  void clear_into_younger(Generation* gen);
+  void clear_into_younger(Generation* old_gen);
 
   void invalidate(MemRegion mr, bool whole_heap = false) {
     _ct_bs->invalidate(mr, whole_heap);
   }
-  void invalidate_or_clear(Generation* gen, bool younger);
+  void invalidate_or_clear(Generation* old_gen);
 
   static uintx ct_max_alignment_constraint() {
     return CardTableModRefBS::ct_max_alignment_constraint();
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -567,8 +567,6 @@
   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 
   _next_gen = gch->next_gen(this);
-  assert(_next_gen != NULL,
-    "This must be the youngest gen, and not the only gen");
 
   // If the next generation is too full to accommodate promotion
   // from this generation, pass on collection; let the next generation
@@ -901,8 +899,6 @@
   if (_next_gen == NULL) {
     GenCollectedHeap* gch = GenCollectedHeap::heap();
     _next_gen = gch->next_gen(this);
-    assert(_next_gen != NULL,
-           "This must be the youngest gen, and not the only gen");
   }
   return _next_gen->promotion_attempt_is_safe(used());
 }
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1070,13 +1070,13 @@
 
 
 void GenCollectedHeap::prepare_for_compaction() {
-  Generation* scanning_gen = _gens[_n_gens-1];
+  guarantee(_n_gens = 2, "Wrong number of generations");
+  Generation* old_gen = _gens[1];
   // Start by compacting into same gen.
-  CompactPoint cp(scanning_gen, NULL, NULL);
-  while (scanning_gen != NULL) {
-    scanning_gen->prepare_for_compaction(&cp);
-    scanning_gen = prev_gen(scanning_gen);
-  }
+  CompactPoint cp(old_gen, NULL, NULL);
+  old_gen->prepare_for_compaction(&cp);
+  Generation* young_gen = _gens[0];
+  young_gen->prepare_for_compaction(&cp);
 }
 
 GCStats* GenCollectedHeap::gc_stats(int level) const {
@@ -1245,27 +1245,14 @@
   generation_iterate(&ep_cl, false);
 }
 
-oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
+oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
                                               oop obj,
                                               size_t obj_size) {
+  guarantee(old_gen->level() == 1, "We only get here with an old generation");
   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   HeapWord* result = NULL;
 
-  // First give each higher generation a chance to allocate the promoted object.
-  Generation* allocator = next_gen(gen);
-  if (allocator != NULL) {
-    do {
-      result = allocator->allocate(obj_size, false);
-    } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
-  }
-
-  if (result == NULL) {
-    // Then give gen and higher generations a chance to expand and allocate the
-    // object.
-    do {
-      result = gen->expand_and_allocate(obj_size, false);
-    } while (result == NULL && (gen = next_gen(gen)) != NULL);
-  }
+  result = old_gen->expand_and_allocate(obj_size, false);
 
   if (result != NULL) {
     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -368,25 +368,23 @@
   // collection.
   virtual bool is_maximal_no_gc() const;
 
-  // Return the generation before "gen", or else NULL.
+  // Return the generation before "gen".
   Generation* prev_gen(Generation* gen) const {
     int l = gen->level();
-    if (l == 0) return NULL;
-    else return _gens[l-1];
+    guarantee(l > 0, "Out of bounds");
+    return _gens[l-1];
   }
 
-  // Return the generation after "gen", or else NULL.
+  // Return the generation after "gen".
   Generation* next_gen(Generation* gen) const {
     int l = gen->level() + 1;
-    if (l == _n_gens) return NULL;
-    else return _gens[l];
+    guarantee(l < _n_gens, "Out of bounds");
+    return _gens[l];
   }
 
   Generation* get_gen(int i) const {
-    if (i >= 0 && i < _n_gens)
-      return _gens[i];
-    else
-      return NULL;
+    guarantee(i >= 0 && i < _n_gens, "Out of bounds");
+    return _gens[i];
   }
 
   int n_gens() const {
@@ -485,9 +483,9 @@
 
   // Promotion of obj into gen failed.  Try to promote obj to higher
   // gens in ascending order; return the new location of obj if successful.
-  // Otherwise, try expand-and-allocate for obj in each generation starting at
-  // gen; return the new location of obj if successful.  Otherwise, return NULL.
-  oop handle_failed_promotion(Generation* gen,
+  // Otherwise, try expand-and-allocate for obj in both the young and old
+  // generation; return the new location of obj if successful.  Otherwise, return NULL.
+  oop handle_failed_promotion(Generation* old_gen,
                               oop obj,
                               size_t obj_size);
 
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -52,8 +52,8 @@
 #include "utilities/copy.hpp"
 #include "utilities/events.hpp"
 
-void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
-  bool clear_all_softrefs) {
+void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
+  guarantee(level == 1, "We always collect both old and young.");
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -84,11 +84,6 @@
   // Capture heap size before collection for printing.
   size_t gch_prev_used = gch->used();
 
-  // Some of the card table updates below assume that the perm gen is
-  // also being collected.
-  assert(level == gch->n_gens() - 1,
-         "All generations are being collected, ergo perm gen too.");
-
   // Capture used regions for each generation that will be
   // subject to collection, so that card table adjustments can
   // be made intelligently (see clear / invalidate further below).
@@ -126,17 +121,15 @@
     all_empty = all_empty && gch->get_gen(i)->used() == 0;
   }
   GenRemSet* rs = gch->rem_set();
+  Generation* old_gen = gch->get_gen(level);
   // Clear/invalidate below make use of the "prev_used_regions" saved earlier.
   if (all_empty) {
     // We've evacuated all generations below us.
-    Generation* g = gch->get_gen(level);
-    rs->clear_into_younger(g);
+    rs->clear_into_younger(old_gen);
   } else {
     // Invalidate the cards corresponding to the currently used
-    // region and clear those corresponding to the evacuated region
-    // of all generations just collected (i.e. level and younger).
-    rs->invalidate_or_clear(gch->get_gen(level),
-                            true /* younger */);
+    // region and clear those corresponding to the evacuated region.
+    rs->invalidate_or_clear(old_gen);
   }
 
   Threads::gc_epilogue();
--- a/hotspot/src/share/vm/memory/genRemSet.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/genRemSet.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -135,7 +135,7 @@
   // younger than gen from generations gen and older.
   // The parameter clear_perm indicates if the perm_gen's
   // remembered set should also be processed/cleared.
-  virtual void clear_into_younger(Generation* gen) = 0;
+  virtual void clear_into_younger(Generation* old_gen) = 0;
 
   // Informs the RS that refs in the given "mr" may have changed
   // arbitrarily, and therefore may contain old-to-young pointers.
@@ -146,11 +146,8 @@
 
   // Informs the RS that refs in this generation
   // may have changed arbitrarily, and therefore may contain
-  // old-to-young pointers in arbitrary locations. The parameter
-  // younger indicates if the same should be done for younger generations
-  // as well. The parameter perm indicates if the same should be done for
-  // perm gen as well.
-  virtual void invalidate_or_clear(Generation* gen, bool younger) = 0;
+  // old-to-young pointers in arbitrary locations.
+  virtual void invalidate_or_clear(Generation* old_gen) = 0;
 };
 
 #endif // SHARE_VM_MEMORY_GENREMSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/padded.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_PADDED_HPP
+#define SHARE_VM_MEMORY_PADDED_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
+// expected cache line size (a power of two).  The first addend avoids sharing
+// when the start address is not a multiple of alignment; the second maintains
+// alignment of starting addresses that happen to be a multiple.
+#define PADDING_SIZE(type, alignment)                           \
+  ((alignment) + align_size_up_(sizeof(type), alignment))
+
+// Templates to create a subclass padded to avoid cache line sharing.  These are
+// effective only when applied to derived-most (leaf) classes.
+
+// When no args are passed to the base ctor.
+template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded : public T {
+ private:
+  char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
+
+// When either 0 or 1 args may be passed to the base ctor.
+template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded01 : public T {
+ public:
+  Padded01(): T() { }
+  Padded01(Arg1T arg1): T(arg1) { }
+ private:
+  char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
+
+// Super class of PaddedEnd when pad_size != 0.
+template <class T, size_t pad_size>
+class PaddedEndImpl : public T {
+ private:
+  char _pad_buf[pad_size];
+};
+
+// Super class of PaddedEnd when pad_size == 0.
+template <class T>
+class PaddedEndImpl<T, /*pad_size*/ 0> : public T {
+  // No padding.
+};
+
+#define PADDED_END_SIZE(type, alignment) (align_size_up_(sizeof(type), alignment) - sizeof(type))
+
+// More memory conservative implementation of Padded. The subclass adds the
+// minimal amount of padding needed to make the size of the objects be aligned.
+// This will help reducing false sharing,
+// if the start address is a multiple of alignment.
+template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class PaddedEnd : public PaddedEndImpl<T, PADDED_END_SIZE(T, alignment)> {
+  // C++ don't allow zero-length arrays. The padding is put in a
+  // super class that is specialized for the pad_size == 0 case.
+};
+
+// Helper class to create an array of PaddedEnd<T> objects. All elements will
+// start at a multiple of alignment and the size will be aligned to alignment.
+template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class PaddedArray {
+ public:
+  // Creates an aligned padded array.
+  // The memory can't be deleted since the raw memory chunk is not returned.
+  static PaddedEnd<T>* create_unfreeable(uint length);
+};
+
+#endif // SHARE_VM_MEMORY_PADDED_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/padded.inline.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "memory/allocation.inline.hpp"
+#include "memory/padded.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Creates an aligned padded array.
+// The memory can't be deleted since the raw memory chunk is not returned.
+template <class T, MEMFLAGS flags, size_t alignment>
+PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
+  // Check that the PaddedEnd class works as intended.
+  STATIC_ASSERT(is_size_aligned_(sizeof(PaddedEnd<T>), alignment));
+
+  // Allocate a chunk of memory large enough to allow for some alignment.
+  void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags);
+
+  // Make the initial alignment.
+  PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_pointer_up(chunk, alignment);
+
+  // Call the default constructor for each element.
+  for (uint i = 0; i < length; i++) {
+    ::new (&aligned_padded_array[i]) T();
+  }
+
+  return aligned_padded_array;
+}
--- a/hotspot/src/share/vm/memory/universe.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -105,10 +105,9 @@
 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
 oop Universe::_the_null_string                        = NULL;
 oop Universe::_the_min_jint_string                   = NULL;
-LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
-LatestMethodOopCache* Universe::_loader_addClass_cache    = NULL;
-LatestMethodOopCache* Universe::_pd_implies_cache         = NULL;
-ActiveMethodOopsCache* Universe::_reflect_invoke_cache    = NULL;
+LatestMethodCache* Universe::_finalizer_register_cache = NULL;
+LatestMethodCache* Universe::_loader_addClass_cache    = NULL;
+LatestMethodCache* Universe::_pd_implies_cache         = NULL;
 oop Universe::_out_of_memory_error_java_heap          = NULL;
 oop Universe::_out_of_memory_error_metaspace          = NULL;
 oop Universe::_out_of_memory_error_class_metaspace    = NULL;
@@ -225,7 +224,6 @@
   f->do_ptr((void**)&_the_empty_klass_array);
   _finalizer_register_cache->serialize(f);
   _loader_addClass_cache->serialize(f);
-  _reflect_invoke_cache->serialize(f);
   _pd_implies_cache->serialize(f);
 }
 
@@ -649,10 +647,9 @@
 
   // We have a heap so create the Method* caches before
   // Metaspace::initialize_shared_spaces() tries to populate them.
-  Universe::_finalizer_register_cache = new LatestMethodOopCache();
-  Universe::_loader_addClass_cache    = new LatestMethodOopCache();
-  Universe::_pd_implies_cache         = new LatestMethodOopCache();
-  Universe::_reflect_invoke_cache     = new ActiveMethodOopsCache();
+  Universe::_finalizer_register_cache = new LatestMethodCache();
+  Universe::_loader_addClass_cache    = new LatestMethodCache();
+  Universe::_pd_implies_cache         = new LatestMethodCache();
 
   if (UseSharedSpaces) {
     // Read the data structures supporting the shared spaces (shared
@@ -1088,35 +1085,21 @@
                                   vmSymbols::register_method_name(),
                                   vmSymbols::register_method_signature());
   if (m == NULL || !m->is_static()) {
-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
-      "java.lang.ref.Finalizer.register", false);
+    tty->print_cr("Unable to link/verify Finalizer.register method");
+    return false; // initialization failed (cannot throw exception yet)
   }
   Universe::_finalizer_register_cache->init(
-    SystemDictionary::Finalizer_klass(), m, CHECK_false);
-
-  // Resolve on first use and initialize class.
-  // Note: No race-condition here, since a resolve will always return the same result
-
-  // Setup method for security checks
-  k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
-  k_h = instanceKlassHandle(THREAD, k);
-  k_h->link_class(CHECK_false);
-  m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
-  if (m == NULL || m->is_static()) {
-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
-      "java.lang.reflect.Method.invoke", false);
-  }
-  Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
+    SystemDictionary::Finalizer_klass(), m);
 
   // Setup method for registering loaded classes in class loader vector
   InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
   m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
   if (m == NULL || m->is_static()) {
-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
-      "java.lang.ClassLoader.addClass", false);
+    tty->print_cr("Unable to link/verify ClassLoader.addClass method");
+    return false; // initialization failed (cannot throw exception yet)
   }
   Universe::_loader_addClass_cache->init(
-    SystemDictionary::ClassLoader_klass(), m, CHECK_false);
+    SystemDictionary::ClassLoader_klass(), m);
 
   // Setup method for checking protection domain
   InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
@@ -1132,7 +1115,7 @@
       return false; // initialization failed
     }
     Universe::_pd_implies_cache->init(
-      SystemDictionary::ProtectionDomain_klass(), m, CHECK_false);;
+      SystemDictionary::ProtectionDomain_klass(), m);;
   }
 
   // The folowing is initializing converter functions for serialization in
@@ -1455,7 +1438,7 @@
 }
 
 
-void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
+void LatestMethodCache::init(Klass* k, Method* m) {
   if (!UseSharedSpaces) {
     _klass = k;
   }
@@ -1471,88 +1454,7 @@
 }
 
 
-ActiveMethodOopsCache::~ActiveMethodOopsCache() {
-  if (_prev_methods != NULL) {
-    delete _prev_methods;
-    _prev_methods = NULL;
-  }
-}
-
-
-void ActiveMethodOopsCache::add_previous_version(Method* method) {
-  assert(Thread::current()->is_VM_thread(),
-    "only VMThread can add previous versions");
-
-  // Only append the previous method if it is executing on the stack.
-  if (method->on_stack()) {
-
-    if (_prev_methods == NULL) {
-      // This is the first previous version so make some space.
-      // Start with 2 elements under the assumption that the class
-      // won't be redefined much.
-      _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
-    }
-
-    // RC_TRACE macro has an embedded ResourceMark
-    RC_TRACE(0x00000100,
-      ("add: %s(%s): adding prev version ref for cached method @%d",
-        method->name()->as_C_string(), method->signature()->as_C_string(),
-        _prev_methods->length()));
-
-    _prev_methods->append(method);
-  }
-
-
-  // Since the caller is the VMThread and we are at a safepoint, this is a good
-  // time to clear out unused method references.
-
-  if (_prev_methods == NULL) return;
-
-  for (int i = _prev_methods->length() - 1; i >= 0; i--) {
-    Method* method = _prev_methods->at(i);
-    assert(method != NULL, "weak method ref was unexpectedly cleared");
-
-    if (!method->on_stack()) {
-      // This method isn't running anymore so remove it
-      _prev_methods->remove_at(i);
-      MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method);
-    } else {
-      // RC_TRACE macro has an embedded ResourceMark
-      RC_TRACE(0x00000400,
-        ("add: %s(%s): previous cached method @%d is alive",
-         method->name()->as_C_string(), method->signature()->as_C_string(), i));
-    }
-  }
-} // end add_previous_version()
-
-
-bool ActiveMethodOopsCache::is_same_method(const Method* method) const {
-  InstanceKlass* ik = InstanceKlass::cast(klass());
-  const Method* check_method = ik->method_with_idnum(method_idnum());
-  assert(check_method != NULL, "sanity check");
-  if (check_method == method) {
-    // done with the easy case
-    return true;
-  }
-
-  if (_prev_methods != NULL) {
-    // The cached method has been redefined at least once so search
-    // the previous versions for a match.
-    for (int i = 0; i < _prev_methods->length(); i++) {
-      check_method = _prev_methods->at(i);
-      if (check_method == method) {
-        // a previous version matches
-        return true;
-      }
-    }
-  }
-
-  // either no previous versions or no previous version matched
-  return false;
-}
-
-
-Method* LatestMethodOopCache::get_Method() {
+Method* LatestMethodCache::get_method() {
   if (klass() == NULL) return NULL;
   InstanceKlass* ik = InstanceKlass::cast(klass());
   Method* m = ik->method_with_idnum(method_idnum());
--- a/hotspot/src/share/vm/memory/universe.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/memory/universe.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -41,10 +41,11 @@
 class DeferredObjAllocEvent;
 
 
-// Common parts of a Method* cache. This cache safely interacts with
-// the RedefineClasses API.
-//
-class CommonMethodOopCache : public CHeapObj<mtClass> {
+// A helper class for caching a Method* when the user of the cache
+// only cares about the latest version of the Method*.  This cache safely
+// interacts with the RedefineClasses API.
+
+class LatestMethodCache : public CHeapObj<mtClass> {
   // We save the Klass* and the idnum of Method* in order to get
   // the current cached Method*.
  private:
@@ -52,12 +53,14 @@
   int                   _method_idnum;
 
  public:
-  CommonMethodOopCache()   { _klass = NULL; _method_idnum = -1; }
-  ~CommonMethodOopCache()  { _klass = NULL; _method_idnum = -1; }
+  LatestMethodCache()   { _klass = NULL; _method_idnum = -1; }
+  ~LatestMethodCache()  { _klass = NULL; _method_idnum = -1; }
 
-  void     init(Klass* k, Method* m, TRAPS);
-  Klass* klass() const         { return _klass; }
-  int      method_idnum() const  { return _method_idnum; }
+  void   init(Klass* k, Method* m);
+  Klass* klass() const           { return _klass; }
+  int    method_idnum() const    { return _method_idnum; }
+
+  Method* get_method();
 
   // Enhanced Class Redefinition support
   void classes_do(void f(Klass*)) {
@@ -72,39 +75,6 @@
 };
 
 
-// A helper class for caching a Method* when the user of the cache
-// cares about all versions of the Method*.
-//
-class ActiveMethodOopsCache : public CommonMethodOopCache {
-  // This subclass adds weak references to older versions of the
-  // Method* and a query method for a Method*.
-
- private:
-  // If the cached Method* has not been redefined, then
-  // _prev_methods will be NULL. If all of the previous
-  // versions of the method have been collected, then
-  // _prev_methods can have a length of zero.
-  GrowableArray<Method*>* _prev_methods;
-
- public:
-  ActiveMethodOopsCache()   { _prev_methods = NULL; }
-  ~ActiveMethodOopsCache();
-
-  void add_previous_version(Method* method);
-  bool is_same_method(const Method* method) const;
-};
-
-
-// A helper class for caching a Method* when the user of the cache
-// only cares about the latest version of the Method*.
-//
-class LatestMethodOopCache : public CommonMethodOopCache {
-  // This subclass adds a getter method for the latest Method*.
-
- public:
-  Method* get_Method();
-};
-
 // For UseCompressedOops and UseCompressedKlassPointers.
 struct NarrowPtrStruct {
   // Base address for oop/klass-within-java-object materialization.
@@ -174,10 +144,10 @@
   static objArrayOop  _the_empty_class_klass_array;   // Canonicalized obj array of type java.lang.Class
   static oop          _the_null_string;               // A cache of "null" as a Java string
   static oop          _the_min_jint_string;          // A cache of "-2147483648" as a Java string
-  static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects
-  static LatestMethodOopCache* _loader_addClass_cache;    // method for registering loaded classes in class loader vector
-  static LatestMethodOopCache* _pd_implies_cache;         // method for checking protection domain attributes
-  static ActiveMethodOopsCache* _reflect_invoke_cache;    // method for security checks
+  static LatestMethodCache* _finalizer_register_cache; // static method for registering finalizable objects
+  static LatestMethodCache* _loader_addClass_cache;    // method for registering loaded classes in class loader vector
+  static LatestMethodCache* _pd_implies_cache;         // method for checking protection domain attributes
+
   // preallocated error objects (no backtrace)
   static oop          _out_of_memory_error_java_heap;
   static oop          _out_of_memory_error_metaspace;
@@ -334,11 +304,11 @@
   static Array<Klass*>* the_array_interfaces_array() { return _the_array_interfaces_array;   }
   static oop          the_null_string()               { return _the_null_string;               }
   static oop          the_min_jint_string()          { return _the_min_jint_string;          }
-  static Method*      finalizer_register_method()     { return _finalizer_register_cache->get_Method(); }
-  static Method*      loader_addClass_method()        { return _loader_addClass_cache->get_Method(); }
 
-  static Method*      protection_domain_implies_method() { return _pd_implies_cache->get_Method(); }
-  static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
+  static Method*      finalizer_register_method()     { return _finalizer_register_cache->get_method(); }
+  static Method*      loader_addClass_method()        { return _loader_addClass_cache->get_method(); }
+
+  static Method*      protection_domain_implies_method() { return _pd_implies_cache->get_method(); }
 
   static oop          null_ptr_exception_instance()   { return _null_ptr_exception_instance;   }
   static oop          arithmetic_exception_instance() { return _arithmetic_exception_instance; }
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -981,7 +981,6 @@
 bool Method::is_ignored_by_security_stack_walk() const {
   const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
 
-  assert(intrinsic_id() != vmIntrinsics::_invoke || Universe::reflect_invoke_cache()->is_same_method((Method*)this), "sanity");
   if (intrinsic_id() == vmIntrinsics::_invoke) {
     // This is Method.invoke() -- ignore it
     return true;
--- a/hotspot/src/share/vm/opto/block.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/block.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -221,7 +221,7 @@
 //------------------------------is_uncommon------------------------------------
 // True if block is low enough frequency or guarded by a test which
 // mostly does not go here.
-bool Block::is_uncommon( Block_Array &bbs ) const {
+bool Block::is_uncommon(PhaseCFG* cfg) const {
   // Initial blocks must never be moved, so are never uncommon.
   if (head()->is_Root() || head()->is_Start())  return false;
 
@@ -238,7 +238,7 @@
   uint uncommon_for_freq_preds = 0;
 
   for( uint i=1; i<num_preds(); i++ ) {
-    Block* guard = bbs[pred(i)->_idx];
+    Block* guard = cfg->get_block_for_node(pred(i));
     // Check to see if this block follows its guard 1 time out of 10000
     // or less.
     //
@@ -285,11 +285,11 @@
   }
 }
 
-void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const {
+void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
   if (is_connector()) {
     for (uint i=1; i<num_preds(); i++) {
-      Block *p = ((*bbs)[pred(i)->_idx]);
-      p->dump_pred(bbs, orig, st);
+      Block *p = cfg->get_block_for_node(pred(i));
+      p->dump_pred(cfg, orig, st);
     }
   } else {
     dump_bidx(orig, st);
@@ -297,7 +297,7 @@
   }
 }
 
-void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
+void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
   // Print the basic block
   dump_bidx(this, st);
   st->print(": #\t");
@@ -311,26 +311,28 @@
   if( head()->is_block_start() ) {
     for (uint i=1; i<num_preds(); i++) {
       Node *s = pred(i);
-      if (bbs) {
-        Block *p = (*bbs)[s->_idx];
-        p->dump_pred(bbs, p, st);
+      if (cfg != NULL) {
+        Block *p = cfg->get_block_for_node(s);
+        p->dump_pred(cfg, p, st);
       } else {
         while (!s->is_block_start())
           s = s->in(0);
         st->print("N%d ", s->_idx );
       }
     }
-  } else
+  } else {
     st->print("BLOCK HEAD IS JUNK  ");
+  }
 
   // Print loop, if any
   const Block *bhead = this;    // Head of self-loop
   Node *bh = bhead->head();
-  if( bbs && bh->is_Loop() && !head()->is_Root() ) {
+
+  if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
     LoopNode *loop = bh->as_Loop();
-    const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
+    const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
     while (bx->is_connector()) {
-      bx = (*bbs)[bx->pred(1)->_idx];
+      bx = cfg->get_block_for_node(bx->pred(1));
     }
     st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
     // Dump any loop-specific bits, especially for CountedLoops.
@@ -349,29 +351,32 @@
   st->print_cr("");
 }
 
-void Block::dump() const { dump(NULL); }
+void Block::dump() const {
+  dump(NULL);
+}
 
-void Block::dump( const Block_Array *bbs ) const {
-  dump_head(bbs);
-  uint cnt = _nodes.size();
-  for( uint i=0; i<cnt; i++ )
+void Block::dump(const PhaseCFG* cfg) const {
+  dump_head(cfg);
+  for (uint i=0; i< _nodes.size(); i++) {
     _nodes[i]->dump();
+  }
   tty->print("\n");
 }
 #endif
 
 //=============================================================================
 //------------------------------PhaseCFG---------------------------------------
-PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
-  Phase(CFG),
-  _bbs(a),
-  _root(r),
-  _node_latency(NULL)
+PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
+: Phase(CFG)
+, _block_arena(arena)
+, _node_to_block_mapping(arena)
+, _root(root)
+, _node_latency(NULL)
 #ifndef PRODUCT
-  , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
+, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
 #endif
 #ifdef ASSERT
-  , _raw_oops(a)
+, _raw_oops(arena)
 #endif
 {
   ResourceMark rm;
@@ -380,13 +385,13 @@
   // Node on demand.
   Node *x = new (C) GotoNode(NULL);
   x->init_req(0, x);
-  _goto = m.match_tree(x);
+  _goto = matcher.match_tree(x);
   assert(_goto != NULL, "");
   _goto->set_req(0,_goto);
 
   // Build the CFG in Reverse Post Order
   _num_blocks = build_cfg();
-  _broot = _bbs[_root->_idx];
+  _broot = get_block_for_node(_root);
 }
 
 //------------------------------build_cfg--------------------------------------
@@ -440,9 +445,9 @@
       // 'p' now points to the start of this basic block
 
       // Put self in array of basic blocks
-      Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
-      _bbs.map(p->_idx,bb);
-      _bbs.map(x->_idx,bb);
+      Block *bb = new (_block_arena) Block(_block_arena, p);
+      map_node_to_block(p, bb);
+      map_node_to_block(x, bb);
       if( x != p ) {                // Only for root is x == p
         bb->_nodes.push((Node*)x);
       }
@@ -473,16 +478,16 @@
       // Check if it the fist node pushed on stack at the beginning.
       if (idx == 0) break;          // end of the build
       // Find predecessor basic block
-      Block *pb = _bbs[x->_idx];
+      Block *pb = get_block_for_node(x);
       // Insert into nodes array, if not already there
-      if( !_bbs.lookup(proj->_idx) ) {
+      if (!has_block(proj)) {
         assert( x != proj, "" );
         // Map basic block of projection
-        _bbs.map(proj->_idx,pb);
+        map_node_to_block(proj, pb);
         pb->_nodes.push(proj);
       }
       // Insert self as a child of my predecessor block
-      pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
+      pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
       assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
               "too many control users, not a CFG?" );
     }
@@ -511,15 +516,15 @@
   RegionNode* region = new (C) RegionNode(2);
   region->init_req(1, proj);
   // setup corresponding basic block
-  Block* block = new (_bbs._arena) Block(_bbs._arena, region);
-  _bbs.map(region->_idx, block);
+  Block* block = new (_block_arena) Block(_block_arena, region);
+  map_node_to_block(region, block);
   C->regalloc()->set_bad(region->_idx);
   // add a goto node
   Node* gto = _goto->clone(); // get a new goto node
   gto->set_req(0, region);
   // add it to the basic block
   block->_nodes.push(gto);
-  _bbs.map(gto->_idx, block);
+  map_node_to_block(gto, block);
   C->regalloc()->set_bad(gto->_idx);
   // hook up successor block
   block->_succs.map(block->_num_succs++, out);
@@ -570,7 +575,7 @@
   gto->set_req(0, b->head());
   Node *bp = b->_nodes[end_idx];
   b->_nodes.map(end_idx,gto); // Slam over NeverBranch
-  _bbs.map(gto->_idx, b);
+  map_node_to_block(gto, b);
   C->regalloc()->set_bad(gto->_idx);
   b->_nodes.pop();              // Yank projections
   b->_nodes.pop();              // Yank projections
@@ -613,7 +618,7 @@
   // If the previous block conditionally falls into bx, return false,
   // because moving bx will create an extra jump.
   for(uint k = 1; k < bx->num_preds(); k++ ) {
-    Block* pred = _bbs[bx->pred(k)->_idx];
+    Block* pred = get_block_for_node(bx->pred(k));
     if (pred == _blocks[bx_index-1]) {
       if (pred->_num_succs != 1) {
         return false;
@@ -682,7 +687,7 @@
 
     // Look for uncommon blocks and move to end.
     if (!C->do_freq_based_layout()) {
-      if( b->is_uncommon(_bbs) ) {
+      if (b->is_uncommon(this)) {
         move_to_end(b, i);
         last--;                   // No longer check for being uncommon!
         if( no_flip_branch(b) ) { // Fall-thru case must follow?
@@ -870,28 +875,31 @@
   } while( !p->is_block_start() );
 
   // Recursively visit
-  for( uint i=1; i<p->req(); i++ )
-    _dump_cfg(p->in(i),visited);
+  for (uint i = 1; i < p->req(); i++) {
+    _dump_cfg(p->in(i), visited);
+  }
 
   // Dump the block
-  _bbs[p->_idx]->dump(&_bbs);
+  get_block_for_node(p)->dump(this);
 }
 
 void PhaseCFG::dump( ) const {
   tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
-  if( _blocks.size() ) {        // Did we do basic-block layout?
-    for( uint i=0; i<_num_blocks; i++ )
-      _blocks[i]->dump(&_bbs);
+  if (_blocks.size()) {        // Did we do basic-block layout?
+    for (uint i = 0; i < _num_blocks; i++) {
+      _blocks[i]->dump(this);
+    }
   } else {                      // Else do it with a DFS
-    VectorSet visited(_bbs._arena);
+    VectorSet visited(_block_arena);
     _dump_cfg(_root,visited);
   }
 }
 
 void PhaseCFG::dump_headers() {
   for( uint i = 0; i < _num_blocks; i++ ) {
-    if( _blocks[i] == NULL ) continue;
-    _blocks[i]->dump_head(&_bbs);
+    if (_blocks[i]) {
+      _blocks[i]->dump_head(this);
+    }
   }
 }
 
@@ -904,7 +912,7 @@
     uint j;
     for (j = 0; j < cnt; j++)  {
       Node *n = b->_nodes[j];
-      assert( _bbs[n->_idx] == b, "" );
+      assert(get_block_for_node(n) == b, "");
       if (j >= 1 && n->is_Mach() &&
           n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
         assert(j == 1 || b->_nodes[j-1]->is_Phi(),
@@ -913,13 +921,12 @@
       for (uint k = 0; k < n->req(); k++) {
         Node *def = n->in(k);
         if (def && def != n) {
-          assert(_bbs[def->_idx] || def->is_Con(),
-                 "must have block; constants for debug info ok");
+          assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
           // Verify that instructions in the block is in correct order.
           // Uses must follow their definition if they are at the same block.
           // Mostly done to check that MachSpillCopy nodes are placed correctly
           // when CreateEx node is moved in build_ifg_physical().
-          if (_bbs[def->_idx] == b &&
+          if (get_block_for_node(def) == b &&
               !(b->head()->is_Loop() && n->is_Phi()) &&
               // See (+++) comment in reg_split.cpp
               !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
--- a/hotspot/src/share/vm/opto/block.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/block.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -48,13 +48,12 @@
   friend class VMStructs;
   uint _size;                   // allocated size, as opposed to formal limit
   debug_only(uint _limit;)      // limit to formal domain
+  Arena *_arena;                // Arena to allocate in
 protected:
   Block **_blocks;
   void grow( uint i );          // Grow array node to fit
 
 public:
-  Arena *_arena;                // Arena to allocate in
-
   Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
     debug_only(_limit=0);
     _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
@@ -77,7 +76,7 @@
 public:
   uint _cnt;
   Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
-  void push( Block *b ) { map(_cnt++,b); }
+  void push( Block *b ) {  map(_cnt++,b); }
   Block *pop() { return _blocks[--_cnt]; }
   Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
   void remove( uint i );
@@ -284,15 +283,15 @@
   // helper function that adds caller save registers to MachProjNode
   void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
   // Schedule a call next in the block
-  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
+  uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 
   // Perform basic-block local scheduling
   Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
-  void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
-  void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
+  void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
+  void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
   bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
   // Cleanup if any code lands between a Call and his Catch
-  void call_catch_cleanup(Block_Array &bbs, Compile *C);
+  void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
   // Detect implicit-null-check opportunities.  Basically, find NULL checks
   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
   // I can generate a memory op if there is not one nearby.
@@ -331,15 +330,15 @@
 
   // Use frequency calculations and code shape to predict if the block
   // is uncommon.
-  bool is_uncommon( Block_Array &bbs ) const;
+  bool is_uncommon(PhaseCFG* cfg) const;
 
 #ifndef PRODUCT
   // Debugging print of basic block
   void dump_bidx(const Block* orig, outputStream* st = tty) const;
-  void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const;
-  void dump_head( const Block_Array *bbs, outputStream* st = tty ) const;
+  void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
+  void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
   void dump() const;
-  void dump( const Block_Array *bbs ) const;
+  void dump(const PhaseCFG* cfg) const;
 #endif
 };
 
@@ -349,6 +348,12 @@
 class PhaseCFG : public Phase {
   friend class VMStructs;
  private:
+  // Arena for the blocks to be stored in
+  Arena* _block_arena;
+
+  // Map nodes to owning basic block
+  Block_Array _node_to_block_mapping;
+
   // Build a proper looking cfg.  Return count of basic blocks
   uint build_cfg();
 
@@ -371,22 +376,42 @@
 
   Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
   void verify_anti_dependences(Block* LCA, Node* load) {
-    assert(LCA == _bbs[load->_idx], "should already be scheduled");
+    assert(LCA == get_block_for_node(load), "should already be scheduled");
     insert_anti_dependences(LCA, load, true);
   }
 
  public:
-  PhaseCFG( Arena *a, RootNode *r, Matcher &m );
+  PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
 
   uint _num_blocks;             // Count of basic blocks
   Block_List _blocks;           // List of basic blocks
   RootNode *_root;              // Root of whole program
-  Block_Array _bbs;             // Map Nodes to owning Basic Block
   Block *_broot;                // Basic block of root
   uint _rpo_ctr;
   CFGLoop* _root_loop;
   float _outer_loop_freq;       // Outmost loop frequency
 
+
+  // set which block this node should reside in
+  void map_node_to_block(const Node* node, Block* block) {
+    _node_to_block_mapping.map(node->_idx, block);
+  }
+
+  // removes the mapping from a node to a block
+  void unmap_node_from_block(const Node* node) {
+    _node_to_block_mapping.map(node->_idx, NULL);
+  }
+
+  // get the block in which this node resides
+  Block* get_block_for_node(const Node* node) const {
+    return _node_to_block_mapping[node->_idx];
+  }
+
+  // does this node reside in a block; return true
+  bool has_block(const Node* node) const {
+    return (_node_to_block_mapping.lookup(node->_idx) != NULL);
+  }
+
   // Per node latency estimation, valid only during GCM
   GrowableArray<uint> *_node_latency;
 
@@ -405,7 +430,7 @@
   void Estimate_Block_Frequency();
 
   // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
-  // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
+  // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
   void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
 
   // Compute the (backwards) latency of a node from the uses
@@ -454,7 +479,7 @@
   // Insert a node into a block, and update the _bbs
   void insert( Block *b, uint idx, Node *n ) {
     b->_nodes.insert( idx, n );
-    _bbs.map( n->_idx, b );
+    map_node_to_block(n, b);
   }
 
 #ifndef PRODUCT
@@ -543,7 +568,7 @@
     _child(NULL),
     _exit_prob(1.0f) {}
   CFGLoop* parent() { return _parent; }
-  void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk);
+  void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
   void add_member(CFGElement *s) { _members.push(s); }
   void add_nested_loop(CFGLoop* cl);
   Block* head() {
--- a/hotspot/src/share/vm/opto/buildOopMap.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/buildOopMap.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -426,14 +426,16 @@
   }
   memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
   // Push preds onto worklist
-  for( uint i=1; i<root->req(); i++ )
-    worklist->push(cfg->_bbs[root->in(i)->_idx]);
+  for (uint i = 1; i < root->req(); i++) {
+    Block* block = cfg->get_block_for_node(root->in(i));
+    worklist->push(block);
+  }
 
   // ZKM.jar includes tiny infinite loops which are unreached from below.
   // If we missed any blocks, we'll retry here after pushing all missed
   // blocks on the worklist.  Normally this outer loop never trips more
   // than once.
-  while( 1 ) {
+  while (1) {
 
     while( worklist->size() ) { // Standard worklist algorithm
       Block *b = worklist->rpop();
@@ -537,8 +539,10 @@
         for( l=0; l<max_reg_ints; l++ )
           old_live[l] = tmp_live[l];
         // Push preds onto worklist
-        for( l=1; l<(int)b->num_preds(); l++ )
-          worklist->push(cfg->_bbs[b->pred(l)->_idx]);
+        for (l = 1; l < (int)b->num_preds(); l++) {
+          Block* block = cfg->get_block_for_node(b->pred(l));
+          worklist->push(block);
+        }
       }
     }
 
@@ -629,10 +633,9 @@
     // pred to this block.  Otherwise we have to grab a new OopFlow.
     OopFlow *flow = NULL;       // Flag for finding optimized flow
     Block *pred = (Block*)0xdeadbeef;
-    uint j;
     // Scan this block's preds to find a done predecessor
-    for( j=1; j<b->num_preds(); j++ ) {
-      Block *p = _cfg->_bbs[b->pred(j)->_idx];
+    for (uint j = 1; j < b->num_preds(); j++) {
+      Block* p = _cfg->get_block_for_node(b->pred(j));
       OopFlow *p_flow = flows[p->_pre_order];
       if( p_flow ) {            // Predecessor is done
         assert( p_flow->_b == p, "cross check" );
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -179,6 +179,9 @@
   product_pd(intx,  LoopUnrollLimit,                                        \
           "Unroll loop bodies with node count less than this")              \
                                                                             \
+  product(intx,  LoopMaxUnroll, 16,                                         \
+          "Maximum number of unrolls for main loop")                        \
+                                                                            \
   product(intx,  LoopUnrollMin, 4,                                          \
           "Minimum number of unroll loop bodies before checking progress"   \
           "of rounds of unroll,optimize,..")                                \
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -295,7 +295,7 @@
 
 
 bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
-  Block *bcon = _cfg._bbs[con->_idx];
+  Block* bcon = _cfg.get_block_for_node(con);
   uint cindex = bcon->find_node(con);
   Node *con_next = bcon->_nodes[cindex+1];
   if (con_next->in(0) != con || !con_next->is_MachProj()) {
@@ -306,7 +306,7 @@
   Node *kills = con_next->clone();
   kills->set_req(0, copy);
   b->_nodes.insert(idx, kills);
-  _cfg._bbs.map(kills->_idx, b);
+  _cfg.map_node_to_block(kills, b);
   new_lrg(kills, max_lrg_id);
   return true;
 }
@@ -962,8 +962,7 @@
         // AggressiveCoalesce.  This effectively pre-virtual-splits
         // around uncommon uses of common defs.
         const RegMask &rm = n->in_RegMask(k);
-        if( !after_aggressive &&
-          _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
+        if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) {
           // Since we are BEFORE aggressive coalesce, leave the register
           // mask untrimmed by the call.  This encourages more coalescing.
           // Later, AFTER aggressive, this live range will have to spill
@@ -1709,16 +1708,15 @@
       // set control to _root and place it into Start block
       // (where top() node is placed).
       base->init_req(0, _cfg._root);
-      Block *startb = _cfg._bbs[C->top()->_idx];
+      Block *startb = _cfg.get_block_for_node(C->top());
       startb->_nodes.insert(startb->find_node(C->top()), base );
-      _cfg._bbs.map( base->_idx, startb );
+      _cfg.map_node_to_block(base, startb);
       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
     }
     if (_lrg_map.live_range_id(base) == 0) {
       new_lrg(base, maxlrg++);
     }
-    assert(base->in(0) == _cfg._root &&
-           _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
+    assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
     derived_base_map[derived->_idx] = base;
     return base;
   }
@@ -1754,12 +1752,12 @@
   base->as_Phi()->set_type(t);
 
   // Search the current block for an existing base-Phi
-  Block *b = _cfg._bbs[derived->_idx];
+  Block *b = _cfg.get_block_for_node(derived);
   for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
     Node *phi = b->_nodes[i];
     if( !phi->is_Phi() ) {      // Found end of Phis with no match?
       b->_nodes.insert( i, base ); // Must insert created Phi here as base
-      _cfg._bbs.map( base->_idx, b );
+      _cfg.map_node_to_block(base, b);
       new_lrg(base,maxlrg++);
       break;
     }
@@ -1815,8 +1813,8 @@
       if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
         Node *phi = n->in(1);
         if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
-          Block *phi_block = _cfg._bbs[phi->_idx];
-          if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) {
+          Block *phi_block = _cfg.get_block_for_node(phi);
+          if (_cfg.get_block_for_node(phi_block->pred(2)) == b) {
             const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
             Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
             insert_proj( phi_block, 1, spill, maxlrg++ );
@@ -1870,7 +1868,7 @@
             if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
                  !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
                  (_lrg_map.live_range_id(base) > 0) && // not a constant
-                 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
+                 _cfg.get_block_for_node(base) != b) { // base not def'd in blk)
               // Base pointer is not currently live.  Since I stretched
               // the base pointer to here and it crosses basic-block
               // boundaries, the global live info is now incorrect.
@@ -1993,8 +1991,8 @@
   tty->print("\n");
 }
 
-void PhaseChaitin::dump( const Block * b ) const {
-  b->dump_head( &_cfg._bbs );
+void PhaseChaitin::dump(const Block *b) const {
+  b->dump_head(&_cfg);
 
   // For all instructions
   for( uint j = 0; j < b->_nodes.size(); j++ )
@@ -2299,7 +2297,7 @@
       if (_lrg_map.find_const(n) == lidx) {
         if (!dump_once++) {
           tty->cr();
-          b->dump_head( &_cfg._bbs );
+          b->dump_head(&_cfg);
         }
         dump(n);
         continue;
@@ -2314,7 +2312,7 @@
           if (_lrg_map.find_const(m) == lidx) {
             if (!dump_once++) {
               tty->cr();
-              b->dump_head(&_cfg._bbs);
+              b->dump_head(&_cfg);
             }
             dump(n);
           }
--- a/hotspot/src/share/vm/opto/coalesce.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/coalesce.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -52,7 +52,7 @@
     // Print a nice block header
     tty->print("B%d: ",b->_pre_order);
     for( j=1; j<b->num_preds(); j++ )
-      tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order);
+      tty->print("B%d ", _phc._cfg.get_block_for_node(b->pred(j))->_pre_order);
     tty->print("-> ");
     for( j=0; j<b->_num_succs; j++ )
       tty->print("B%d ",b->_succs[j]->_pre_order);
@@ -208,7 +208,7 @@
     copy->set_req(idx,tmp);
     // Save source in temp early, before source is killed
     b->_nodes.insert(kill_src_idx,tmp);
-    _phc._cfg._bbs.map( tmp->_idx, b );
+    _phc._cfg.map_node_to_block(tmp, b);
     last_use_idx++;
   }
 
@@ -286,7 +286,7 @@
           Node *m = n->in(j);
           uint src_name = _phc._lrg_map.find(m);
           if (src_name != phi_name) {
-            Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
+            Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
             Node *copy;
             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
             // Rematerialize constants instead of copying them
@@ -305,7 +305,7 @@
             }
             // Insert the copy in the use-def chain
             n->set_req(j, copy);
-            _phc._cfg._bbs.map( copy->_idx, pred );
+            _phc._cfg.map_node_to_block(copy, pred);
             // Extend ("register allocate") the names array for the copy.
             _phc._lrg_map.extend(copy->_idx, phi_name);
           } // End of if Phi names do not match
@@ -343,13 +343,13 @@
             n->set_req(idx, copy);
             // Extend ("register allocate") the names array for the copy.
             _phc._lrg_map.extend(copy->_idx, name);
-            _phc._cfg._bbs.map( copy->_idx, b );
+            _phc._cfg.map_node_to_block(copy, b);
           }
 
         } // End of is two-adr
 
         // Insert a copy at a debug use for a lrg which has high frequency
-        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) {
+        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
           // Walk the debug inputs to the node and check for lrg freq
           JVMState* jvms = n->jvms();
           uint debug_start = jvms ? jvms->debug_start() : 999999;
@@ -391,7 +391,7 @@
               uint max_lrg_id = _phc._lrg_map.max_lrg_id();
               _phc.new_lrg(copy, max_lrg_id);
               _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
-              _phc._cfg._bbs.map(copy->_idx, b);
+              _phc._cfg.map_node_to_block(copy, b);
               //tty->print_cr("Split a debug use in Aggressive Coalesce");
             }  // End of if high frequency use/def
           }  // End of for all debug inputs
@@ -437,7 +437,10 @@
     Block *bs = b->_succs[i];
     // Find index of 'b' in 'bs' predecessors
     uint j=1;
-    while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++;
+    while (_phc._cfg.get_block_for_node(bs->pred(j)) != b) {
+      j++;
+    }
+
     // Visit all the Phis in successor block
     for( uint k = 1; k<bs->_nodes.size(); k++ ) {
       Node *n = bs->_nodes[k];
@@ -510,9 +513,9 @@
   if( bindex < b->_fhrp_index ) b->_fhrp_index--;
 
   // Stretched lr1; add it to liveness of intermediate blocks
-  Block *b2 = _phc._cfg._bbs[src_copy->_idx];
+  Block *b2 = _phc._cfg.get_block_for_node(src_copy);
   while( b != b2 ) {
-    b = _phc._cfg._bbs[b->pred(1)->_idx];
+    b = _phc._cfg.get_block_for_node(b->pred(1));
     _phc._live->live(b)->insert(lr1);
   }
 }
@@ -532,7 +535,7 @@
     bindex2--;                  // Chain backwards 1 instruction
     while( bindex2 == 0 ) {     // At block start, find prior block
       assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
-      b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
+      b2 = _phc._cfg.get_block_for_node(b2->pred(1));
       bindex2 = b2->end_idx()-1;
     }
     // Get prior instruction
@@ -676,8 +679,8 @@
 
   if (UseFPUForSpilling && rm.is_AllStack() ) {
     // Don't coalesce when frequency difference is large
-    Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
-    Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
+    Block *dst_b = _phc._cfg.get_block_for_node(dst_copy);
+    Block *src_def_b = _phc._cfg.get_block_for_node(src_def);
     if (src_def_b->_freq > 10*dst_b->_freq )
       return false;
   }
@@ -690,7 +693,7 @@
   // Another early bail-out test is when we are double-coalescing and the
   // 2 copies are separated by some control flow.
   if( dst_copy != src_copy ) {
-    Block *src_b = _phc._cfg._bbs[src_copy->_idx];
+    Block *src_b = _phc._cfg.get_block_for_node(src_copy);
     Block *b2 = b;
     while( b2 != src_b ) {
       if( b2->num_preds() > 2 ){// Found merge-point
@@ -701,7 +704,7 @@
         //record_bias( _phc._lrgs, lr1, lr2 );
         return false;           // To hard to find all interferences
       }
-      b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
+      b2 = _phc._cfg.get_block_for_node(b2->pred(1));
     }
   }
 
@@ -786,8 +789,9 @@
 // Conservative (but pessimistic) copy coalescing of a single block
 void PhaseConservativeCoalesce::coalesce( Block *b ) {
   // Bail out on infrequent blocks
-  if( b->is_uncommon(_phc._cfg._bbs) )
+  if (b->is_uncommon(&_phc._cfg)) {
     return;
+  }
   // Check this block for copies.
   for( uint i = 1; i<b->end_idx(); i++ ) {
     // Check for actual copies on inputs.  Coalesce a copy into its
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -2262,7 +2262,7 @@
       tty->print("%3.3x   ", pcs[n->_idx]);
     else
       tty->print("      ");
-    b->dump_head( &_cfg->_bbs );
+    b->dump_head(_cfg);
     if (b->is_connector()) {
       tty->print_cr("        # Empty connector block");
     } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
@@ -3525,7 +3525,7 @@
 }
 
 Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
-  Block* b = Compile::current()->cfg()->_bbs[n->_idx];
+  Block* b = Compile::current()->cfg()->get_block_for_node(n);
   Constant con(type, value, b->_freq);
   add(con);
   return con;
--- a/hotspot/src/share/vm/opto/domgraph.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/domgraph.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -105,8 +105,8 @@
 
     // Step 2:
     Node *whead = w->_block->head();
-    for( uint j=1; j < whead->req(); j++ ) {
-      Block *b = _bbs[whead->in(j)->_idx];
+    for (uint j = 1; j < whead->req(); j++) {
+      Block* b = get_block_for_node(whead->in(j));
       Tarjan *vx = &tarjan[b->_pre_order];
       Tarjan *u = vx->EVAL();
       if( u->_semi < w->_semi )
--- a/hotspot/src/share/vm/opto/gcm.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -66,7 +66,7 @@
 // are in b also.
 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
   // Set basic block of n, Add n to b,
-  _bbs.map(n->_idx, b);
+  map_node_to_block(n, b);
   b->add_inst(n);
 
   // After Matching, nearly any old Node may have projections trailing it.
@@ -75,11 +75,12 @@
   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
     Node*  use  = n->fast_out(i);
     if (use->is_Proj()) {
-      Block* buse = _bbs[use->_idx];
+      Block* buse = get_block_for_node(use);
       if (buse != b) {              // In wrong block?
-        if (buse != NULL)
+        if (buse != NULL) {
           buse->find_remove(use);   // Remove from wrong block
-        _bbs.map(use->_idx, b);     // Re-insert in this block
+        }
+        map_node_to_block(use, b);
         b->add_inst(use);
       }
     }
@@ -97,7 +98,7 @@
   if (p != NULL && p != n) {    // Control from a block projection?
     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
     // Find trailing Region
-    Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
+    Block *pb = get_block_for_node(in0); // Block-projection already has basic block
     uint j = 0;
     if (pb->_num_succs != 1) {  // More then 1 successor?
       // Search for successor
@@ -127,14 +128,15 @@
   while ( spstack.is_nonempty() ) {
     Node *n = spstack.pop();
     if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
-      if( n->pinned() && !_bbs.lookup(n->_idx) ) {  // Pinned?  Nail it down!
+      if( n->pinned() && !has_block(n)) {  // Pinned?  Nail it down!
         assert( n->in(0), "pinned Node must have Control" );
         // Before setting block replace block_proj control edge
         replace_block_proj_ctrl(n);
         Node *input = n->in(0);
-        while( !input->is_block_start() )
+        while (!input->is_block_start()) {
           input = input->in(0);
-        Block *b = _bbs[input->_idx];  // Basic block of controlling input
+        }
+        Block *b = get_block_for_node(input); // Basic block of controlling input
         schedule_node_into_block(n, b);
       }
       for( int i = n->req() - 1; i >= 0; --i ) {  // For all inputs
@@ -149,7 +151,7 @@
 // Assert that new input b2 is dominated by all previous inputs.
 // Check this by by seeing that it is dominated by b1, the deepest
 // input observed until b2.
-static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
+static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
   if (b1 == NULL)  return;
   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
   Block* tmp = b2;
@@ -162,7 +164,7 @@
     for (uint j=0; j<n->len(); j++) { // For all inputs
       Node* inn = n->in(j); // Get input
       if (inn == NULL)  continue;  // Ignore NULL, missing inputs
-      Block* inb = bbs[inn->_idx];
+      Block* inb = cfg->get_block_for_node(inn);
       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
       inn->dump();
@@ -174,20 +176,20 @@
 }
 #endif
 
-static Block* find_deepest_input(Node* n, Block_Array &bbs) {
+static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
   // Find the last input dominated by all other inputs.
   Block* deepb           = NULL;        // Deepest block so far
   int    deepb_dom_depth = 0;
   for (uint k = 0; k < n->len(); k++) { // For all inputs
     Node* inn = n->in(k);               // Get input
     if (inn == NULL)  continue;         // Ignore NULL, missing inputs
-    Block* inb = bbs[inn->_idx];
+    Block* inb = cfg->get_block_for_node(inn);
     assert(inb != NULL, "must already have scheduled this input");
     if (deepb_dom_depth < (int) inb->_dom_depth) {
       // The new inb must be dominated by the previous deepb.
       // The various inputs must be linearly ordered in the dom
       // tree, or else there will not be a unique deepest block.
-      DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
+      DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
       deepb = inb;                      // Save deepest block
       deepb_dom_depth = deepb->_dom_depth;
     }
@@ -243,7 +245,7 @@
         ++i;
         if (in == NULL) continue;    // Ignore NULL, missing inputs
         int is_visited = visited.test_set(in->_idx);
-        if (!_bbs.lookup(in->_idx)) { // Missing block selection?
+        if (!has_block(in)) { // Missing block selection?
           if (is_visited) {
             // assert( !visited.test(in->_idx), "did not schedule early" );
             return false;
@@ -265,9 +267,9 @@
         // any projections which depend on them.
         if (!n->pinned()) {
           // Set earliest legal block.
-          _bbs.map(n->_idx, find_deepest_input(n, _bbs));
+          map_node_to_block(n, find_deepest_input(n, this));
         } else {
-          assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
+          assert(get_block_for_node(n) == get_block_for_node(n->in(0)), "Pinned Node should be at the same block as its control edge");
         }
 
         if (nstack.is_empty()) {
@@ -313,8 +315,8 @@
 // The definition must dominate the use, so move the LCA upward in the
 // dominator tree to dominate the use.  If the use is a phi, adjust
 // the LCA only with the phi input paths which actually use this def.
-static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
-  Block* buse = bbs[use->_idx];
+static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
+  Block* buse = cfg->get_block_for_node(use);
   if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
   if (!use->is_Phi())  return buse->dom_lca(LCA);
   uint pmax = use->req();       // Number of Phi inputs
@@ -329,7 +331,7 @@
   // more than once.
   for (uint j=1; j<pmax; j++) { // For all inputs
     if (use->in(j) == def) {    // Found matching input?
-      Block* pred = bbs[buse->pred(j)->_idx];
+      Block* pred = cfg->get_block_for_node(buse->pred(j));
       LCA = pred->dom_lca(LCA);
     }
   }
@@ -342,8 +344,7 @@
 // which are marked with the given index.  Return the LCA (in the dom tree)
 // of all marked blocks.  If there are none marked, return the original
 // LCA.
-static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
-                                    Block* early, Block_Array &bbs) {
+static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
   Block_List worklist;
   worklist.push(LCA);
   while (worklist.size() > 0) {
@@ -366,7 +367,7 @@
     } else {
       // Keep searching through this block's predecessors.
       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
-        Block* mid_parent = bbs[ mid->pred(j)->_idx ];
+        Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
         worklist.push(mid_parent);
       }
     }
@@ -384,7 +385,7 @@
 // be earlier (at a shallower dom_depth) than the true schedule_early
 // point of the node. We compute this earlier block as a more permissive
 // site for anti-dependency insertion, but only if subsume_loads is enabled.
-static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
+static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
   Node* base;
   Node* index;
   Node* store = load->in(MemNode::Memory);
@@ -412,12 +413,12 @@
     Block* deepb           = NULL;        // Deepest block so far
     int    deepb_dom_depth = 0;
     for (int i = 0; i < mem_inputs_length; i++) {
-      Block* inb = bbs[mem_inputs[i]->_idx];
+      Block* inb = cfg->get_block_for_node(mem_inputs[i]);
       if (deepb_dom_depth < (int) inb->_dom_depth) {
         // The new inb must be dominated by the previous deepb.
         // The various inputs must be linearly ordered in the dom
         // tree, or else there will not be a unique deepest block.
-        DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
+        DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
         deepb = inb;                      // Save deepest block
         deepb_dom_depth = deepb->_dom_depth;
       }
@@ -488,14 +489,14 @@
   // and other inputs are first available.  (Computed by schedule_early.)
   // For normal loads, 'early' is the shallowest place (dom graph wise)
   // to look for anti-deps between this load and any store.
-  Block* early = _bbs[load_index];
+  Block* early = get_block_for_node(load);
 
   // If we are subsuming loads, compute an "early" block that only considers
   // memory or address inputs. This block may be different than the
   // schedule_early block in that it could be at an even shallower depth in the
   // dominator tree, and allow for a broader discovery of anti-dependences.
   if (C->subsume_loads()) {
-    early = memory_early_block(load, early, _bbs);
+    early = memory_early_block(load, early, this);
   }
 
   ResourceArea *area = Thread::current()->resource_area();
@@ -619,7 +620,7 @@
     // or else observe that 'store' is all the way up in the
     // earliest legal block for 'load'.  In the latter case,
     // immediately insert an anti-dependence edge.
-    Block* store_block = _bbs[store->_idx];
+    Block* store_block = get_block_for_node(store);
     assert(store_block != NULL, "unused killing projections skipped above");
 
     if (store->is_Phi()) {
@@ -637,7 +638,7 @@
       for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
         if (store->in(j) == mem) {   // Found matching input?
           DEBUG_ONLY(found_match = true);
-          Block* pred_block = _bbs[store_block->pred(j)->_idx];
+          Block* pred_block = get_block_for_node(store_block->pred(j));
           if (pred_block != early) {
             // If any predecessor of the Phi matches the load's "early block",
             // we do not need a precedence edge between the Phi and 'load'
@@ -711,7 +712,7 @@
   // preventing the load from sinking past any block containing
   // a store that may invalidate the memory state required by 'load'.
   if (must_raise_LCA)
-    LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
+    LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
   if (LCA == early)  return LCA;
 
   // Insert anti-dependence edges from 'load' to each store
@@ -720,7 +721,7 @@
   if (LCA->raise_LCA_mark() == load_index) {
     while (non_early_stores.size() > 0) {
       Node* store = non_early_stores.pop();
-      Block* store_block = _bbs[store->_idx];
+      Block* store_block = get_block_for_node(store);
       if (store_block == LCA) {
         // add anti_dependence from store to load in its own block
         assert(store != load->in(0), "dependence cycle found");
@@ -754,7 +755,7 @@
 
 public:
   // Constructor for the iterator
-  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
+  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
 
   // Postincrement operator to iterate over the nodes
   Node *next();
@@ -762,12 +763,12 @@
 private:
   VectorSet   &_visited;
   Node_List   &_stack;
-  Block_Array &_bbs;
+  PhaseCFG &_cfg;
 };
 
 // Constructor for the Node_Backward_Iterator
-Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
-  : _visited(visited), _stack(stack), _bbs(bbs) {
+Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
+  : _visited(visited), _stack(stack), _cfg(cfg) {
   // The stack should contain exactly the root
   stack.clear();
   stack.push(root);
@@ -797,8 +798,8 @@
     _visited.set(self->_idx);
 
     // Now schedule all uses as late as possible.
-    uint src     = self->is_Proj() ? self->in(0)->_idx : self->_idx;
-    uint src_rpo = _bbs[src]->_rpo;
+    const Node* src = self->is_Proj() ? self->in(0) : self;
+    uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
 
     // Schedule all nodes in a post-order visit
     Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
@@ -814,7 +815,7 @@
 
       // do not traverse backward control edges
       Node *use = n->is_Proj() ? n->in(0) : n;
-      uint use_rpo = _bbs[use->_idx]->_rpo;
+      uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
 
       if ( use_rpo < src_rpo )
         continue;
@@ -852,7 +853,7 @@
     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
 #endif
 
-  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
+  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
   Node *n;
 
   // Walk over all the nodes from last to first
@@ -883,7 +884,7 @@
 
   uint nlen = n->len();
   uint use_latency = _node_latency->at_grow(n->_idx);
-  uint use_pre_order = _bbs[n->_idx]->_pre_order;
+  uint use_pre_order = get_block_for_node(n)->_pre_order;
 
   for ( uint j=0; j<nlen; j++ ) {
     Node *def = n->in(j);
@@ -903,7 +904,7 @@
 #endif
 
     // If the defining block is not known, assume it is ok
-    Block *def_block = _bbs[def->_idx];
+    Block *def_block = get_block_for_node(def);
     uint def_pre_order = def_block ? def_block->_pre_order : 0;
 
     if ( (use_pre_order <  def_pre_order) ||
@@ -931,10 +932,11 @@
 // Compute the latency of a specific use
 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
   // If self-reference, return no latency
-  if (use == n || use->is_Root())
+  if (use == n || use->is_Root()) {
     return 0;
+  }
 
-  uint def_pre_order = _bbs[def->_idx]->_pre_order;
+  uint def_pre_order = get_block_for_node(def)->_pre_order;
   uint latency = 0;
 
   // If the use is not a projection, then it is simple...
@@ -946,7 +948,7 @@
     }
 #endif
 
-    uint use_pre_order = _bbs[use->_idx]->_pre_order;
+    uint use_pre_order = get_block_for_node(use)->_pre_order;
 
     if (use_pre_order < def_pre_order)
       return 0;
@@ -1018,7 +1020,7 @@
   uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
   uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
   bool in_latency    = (target <= start_latency);
-  const Block* root_block = _bbs[_root->_idx];
+  const Block* root_block = get_block_for_node(_root);
 
   // Turn off latency scheduling if scheduling is just plain off
   if (!C->do_scheduling())
@@ -1126,12 +1128,12 @@
     tty->print("\n#---- schedule_late ----\n");
 #endif
 
-  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
+  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
   Node *self;
 
   // Walk over all the nodes from last to first
   while (self = iter.next()) {
-    Block* early = _bbs[self->_idx];   // Earliest legal placement
+    Block* early = get_block_for_node(self); // Earliest legal placement
 
     if (self->is_top()) {
       // Top node goes in bb #2 with other constants.
@@ -1179,7 +1181,7 @@
       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
         // For all uses, find LCA
         Node* use = self->fast_out(i);
-        LCA = raise_LCA_above_use(LCA, use, self, _bbs);
+        LCA = raise_LCA_above_use(LCA, use, self, this);
       }
     }  // (Hide defs of imax, i from rest of block.)
 
@@ -1187,7 +1189,7 @@
     // requirement for correctness but it reduces useless
     // interference between temps and other nodes.
     if (mach != NULL && mach->is_MachTemp()) {
-      _bbs.map(self->_idx, LCA);
+      map_node_to_block(self, LCA);
       LCA->add_inst(self);
       continue;
     }
@@ -1262,10 +1264,10 @@
   }
 #endif
 
-  // Initialize the bbs.map for things on the proj_list
-  uint i;
-  for( i=0; i < proj_list.size(); i++ )
-    _bbs.map(proj_list[i]->_idx, NULL);
+  // Initialize the node to block mapping for things on the proj_list
+  for (uint i = 0; i < proj_list.size(); i++) {
+    unmap_node_from_block(proj_list[i]);
+  }
 
   // Set the basic block for Nodes pinned into blocks
   Arena *a = Thread::current()->resource_area();
@@ -1333,7 +1335,7 @@
     for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
       Node *proj = matcher._null_check_tests[i  ];
       Node *val  = matcher._null_check_tests[i+1];
-      _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
+      get_block_for_node(proj)->implicit_null_check(this, proj, val, allowed_reasons);
       // The implicit_null_check will only perform the transformation
       // if the null branch is truly uncommon, *and* it leads to an
       // uncommon trap.  Combined with the too_many_traps guards
@@ -1353,7 +1355,7 @@
   uint max_idx = C->unique();
   GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
   visited.Clear();
-  for (i = 0; i < _num_blocks; i++) {
+  for (uint i = 0; i < _num_blocks; i++) {
     if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
         C->record_method_not_compilable("local schedule failed");
@@ -1364,8 +1366,9 @@
 
   // If we inserted any instructions between a Call and his CatchNode,
   // clone the instructions on all paths below the Catch.
-  for( i=0; i < _num_blocks; i++ )
-    _blocks[i]->call_catch_cleanup(_bbs, C);
+  for (uint i = 0; i < _num_blocks; i++) {
+    _blocks[i]->call_catch_cleanup(this, C);
+  }
 
 #ifndef PRODUCT
   if (trace_opto_pipelining()) {
@@ -1392,7 +1395,7 @@
     Block_List worklist;
     Block* root_blk = _blocks[0];
     for (uint i = 1; i < root_blk->num_preds(); i++) {
-      Block *pb = _bbs[root_blk->pred(i)->_idx];
+      Block *pb = get_block_for_node(root_blk->pred(i));
       if (pb->has_uncommon_code()) {
         worklist.push(pb);
       }
@@ -1401,7 +1404,7 @@
       Block* uct = worklist.pop();
       if (uct == _broot) continue;
       for (uint i = 1; i < uct->num_preds(); i++) {
-        Block *pb = _bbs[uct->pred(i)->_idx];
+        Block *pb = get_block_for_node(uct->pred(i));
         if (pb->_num_succs == 1) {
           worklist.push(pb);
         } else if (pb->num_fall_throughs() == 2) {
@@ -1430,7 +1433,7 @@
     Block_List worklist;
     Block* root_blk = _blocks[0];
     for (uint i = 1; i < root_blk->num_preds(); i++) {
-      Block *pb = _bbs[root_blk->pred(i)->_idx];
+      Block *pb = get_block_for_node(root_blk->pred(i));
       if (pb->has_uncommon_code()) {
         worklist.push(pb);
       }
@@ -1439,7 +1442,7 @@
       Block* uct = worklist.pop();
       uct->_freq = PROB_MIN;
       for (uint i = 1; i < uct->num_preds(); i++) {
-        Block *pb = _bbs[uct->pred(i)->_idx];
+        Block *pb = get_block_for_node(uct->pred(i));
         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
           worklist.push(pb);
         }
@@ -1499,7 +1502,7 @@
       Block* loop_head = b;
       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
-      Block* tail = _bbs[tail_n->_idx];
+      Block* tail = get_block_for_node(tail_n);
 
       // Defensively filter out Loop nodes for non-single-entry loops.
       // For all reasonable loops, the head occurs before the tail in RPO.
@@ -1514,13 +1517,13 @@
         loop_head->_loop = nloop;
         // Add to nloop so push_pred() will skip over inner loops
         nloop->add_member(loop_head);
-        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
+        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
 
         while (worklist.size() > 0) {
           Block* member = worklist.pop();
           if (member != loop_head) {
             for (uint j = 1; j < member->num_preds(); j++) {
-              nloop->push_pred(member, j, worklist, _bbs);
+              nloop->push_pred(member, j, worklist, this);
             }
           }
         }
@@ -1557,9 +1560,9 @@
 }
 
 //------------------------------push_pred--------------------------------------
-void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
+void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
   Node* pred_n = blk->pred(i);
-  Block* pred = node_to_blk[pred_n->_idx];
+  Block* pred = cfg->get_block_for_node(pred_n);
   CFGLoop *pred_loop = pred->_loop;
   if (pred_loop == NULL) {
     // Filter out blocks for non-single-entry loops.
@@ -1580,7 +1583,7 @@
       Block* pred_head = pred_loop->head();
       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
       assert(pred_head != head(), "loop head in only one loop");
-      push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
+      push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
     } else {
       assert(pred_loop->_parent == this && _parent == NULL, "just checking");
     }
--- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -413,9 +413,9 @@
     print_prop("debug_idx", node->_debug_idx);
 #endif
 
-    if(C->cfg() != NULL) {
-      Block *block = C->cfg()->_bbs[node->_idx];
-      if(block == NULL) {
+    if (C->cfg() != NULL) {
+      Block* block = C->cfg()->get_block_for_node(node);
+      if (block == NULL) {
         print_prop("block", C->cfg()->_blocks[0]->_pre_order);
       } else {
         print_prop("block", block->_pre_order);
--- a/hotspot/src/share/vm/opto/ifg.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/ifg.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -565,7 +565,7 @@
               lrgs(r)._def = 0;
             }
             n->disconnect_inputs(NULL, C);
-            _cfg._bbs.map(n->_idx,NULL);
+            _cfg.unmap_node_from_block(n);
             n->replace_by(C->top());
             // Since yanking a Node from block, high pressure moves up one
             hrp_index[0]--;
@@ -607,7 +607,7 @@
           if( n->is_SpillCopy()
               && lrgs(r).is_singledef()        // MultiDef live range can still split
               && n->outcnt() == 1              // and use must be in this block
-              && _cfg._bbs[n->unique_out()->_idx] == b ) {
+              && _cfg.get_block_for_node(n->unique_out()) == b ) {
             // All single-use MachSpillCopy(s) that immediately precede their
             // use must color early.  If a longer live range steals their
             // color, the spill copy will split and may push another spill copy
--- a/hotspot/src/share/vm/opto/lcm.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -237,7 +237,7 @@
     }
 
     // Check ctrl input to see if the null-check dominates the memory op
-    Block *cb = cfg->_bbs[mach->_idx];
+    Block *cb = cfg->get_block_for_node(mach);
     cb = cb->_idom;             // Always hoist at least 1 block
     if( !was_store ) {          // Stores can be hoisted only one block
       while( cb->_dom_depth > (_dom_depth + 1))
@@ -262,7 +262,7 @@
         if( is_decoden ) continue;
       }
       // Block of memory-op input
-      Block *inb = cfg->_bbs[mach->in(j)->_idx];
+      Block *inb = cfg->get_block_for_node(mach->in(j));
       Block *b = this;          // Start from nul check
       while( b != inb && b->_dom_depth > inb->_dom_depth )
         b = b->_idom;           // search upwards for input
@@ -272,7 +272,7 @@
     }
     if( j > 0 )
       continue;
-    Block *mb = cfg->_bbs[mach->_idx];
+    Block *mb = cfg->get_block_for_node(mach);
     // Hoisting stores requires more checks for the anti-dependence case.
     // Give up hoisting if we have to move the store past any load.
     if( was_store ) {
@@ -291,7 +291,7 @@
           break;                // Found anti-dependent load
         // Make sure control does not do a merge (would have to check allpaths)
         if( b->num_preds() != 2 ) break;
-        b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block
+        b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
       }
       if( b != this ) continue;
     }
@@ -303,15 +303,15 @@
 
     // Found a candidate!  Pick one with least dom depth - the highest
     // in the dom tree should be closest to the null check.
-    if( !best ||
-        cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
+    if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
       best = mach;
       bidx = vidx;
-
     }
   }
   // No candidate!
-  if( !best ) return;
+  if (best == NULL) {
+    return;
+  }
 
   // ---- Found an implicit null check
   extern int implicit_null_checks;
@@ -319,29 +319,29 @@
 
   if( is_decoden ) {
     // Check if we need to hoist decodeHeapOop_not_null first.
-    Block *valb = cfg->_bbs[val->_idx];
+    Block *valb = cfg->get_block_for_node(val);
     if( this != valb && this->_dom_depth < valb->_dom_depth ) {
       // Hoist it up to the end of the test block.
       valb->find_remove(val);
       this->add_inst(val);
-      cfg->_bbs.map(val->_idx,this);
+      cfg->map_node_to_block(val, this);
       // DecodeN on x86 may kill flags. Check for flag-killing projections
       // that also need to be hoisted.
       for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
         Node* n = val->fast_out(j);
         if( n->is_MachProj() ) {
-          cfg->_bbs[n->_idx]->find_remove(n);
+          cfg->get_block_for_node(n)->find_remove(n);
           this->add_inst(n);
-          cfg->_bbs.map(n->_idx,this);
+          cfg->map_node_to_block(n, this);
         }
       }
     }
   }
   // Hoist the memory candidate up to the end of the test block.
-  Block *old_block = cfg->_bbs[best->_idx];
+  Block *old_block = cfg->get_block_for_node(best);
   old_block->find_remove(best);
   add_inst(best);
-  cfg->_bbs.map(best->_idx,this);
+  cfg->map_node_to_block(best, this);
 
   // Move the control dependence
   if (best->in(0) && best->in(0) == old_block->_nodes[0])
@@ -352,9 +352,9 @@
   for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
     Node* n = best->fast_out(j);
     if( n->is_MachProj() ) {
-      cfg->_bbs[n->_idx]->find_remove(n);
+      cfg->get_block_for_node(n)->find_remove(n);
       add_inst(n);
-      cfg->_bbs.map(n->_idx,this);
+      cfg->map_node_to_block(n, this);
     }
   }
 
@@ -385,7 +385,7 @@
   Node *old_tst = proj->in(0);
   MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
   _nodes.map(end_idx(),nul_chk);
-  cfg->_bbs.map(nul_chk->_idx,this);
+  cfg->map_node_to_block(nul_chk, this);
   // Redirect users of old_test to nul_chk
   for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
     old_tst->last_out(i2)->set_req(0, nul_chk);
@@ -468,7 +468,7 @@
         Node* use = n->fast_out(j);
 
         // The use is a conditional branch, make them adjacent
-        if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) {
+        if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
           found_machif = true;
           break;
         }
@@ -529,13 +529,14 @@
 
 
 //------------------------------set_next_call----------------------------------
-void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
+void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
   if( next_call.test_set(n->_idx) ) return;
   for( uint i=0; i<n->len(); i++ ) {
     Node *m = n->in(i);
     if( !m ) continue;  // must see all nodes in block that precede call
-    if( bbs[m->_idx] == this )
-      set_next_call( m, next_call, bbs );
+    if (cfg->get_block_for_node(m) == this) {
+      set_next_call(m, next_call, cfg);
+    }
   }
 }
 
@@ -545,12 +546,12 @@
 // next subroutine call get priority - basically it moves things NOT needed
 // for the next call till after the call.  This prevents me from trying to
 // carry lots of stuff live across a call.
-void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) {
+void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
   // Find the next control-defining Node in this block
   Node* call = NULL;
   for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
     Node* m = this_call->fast_out(i);
-    if( bbs[m->_idx] == this && // Local-block user
+    if(cfg->get_block_for_node(m) == this && // Local-block user
         m != this_call &&       // Not self-start node
         m->is_MachCall() )
       call = m;
@@ -558,7 +559,7 @@
   }
   if (call == NULL)  return;    // No next call (e.g., block end is near)
   // Set next-call for all inputs to this call
-  set_next_call(call, next_call, bbs);
+  set_next_call(call, next_call, cfg);
 }
 
 //------------------------------add_call_kills-------------------------------------
@@ -578,7 +579,7 @@
 
 
 //------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
   RegMask regs;
 
   // Schedule all the users of the call right now.  All the users are
@@ -597,12 +598,14 @@
     // Check for scheduling the next control-definer
     if( n->bottom_type() == Type::CONTROL )
       // Warm up next pile of heuristic bits
-      needed_for_next_call(n, next_call, bbs);
+      needed_for_next_call(n, next_call, cfg);
 
     // Children of projections are now all ready
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j); // Get user
-      if( bbs[m->_idx] != this ) continue;
+      if(cfg->get_block_for_node(m) != this) {
+        continue;
+      }
       if( m->is_Phi() ) continue;
       int m_cnt = ready_cnt.at(m->_idx)-1;
       ready_cnt.at_put(m->_idx, m_cnt);
@@ -620,7 +623,7 @@
   uint r_cnt = mcall->tf()->range()->cnt();
   int op = mcall->ideal_Opcode();
   MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
-  bbs.map(proj->_idx,this);
+  cfg->map_node_to_block(proj, this);
   _nodes.insert(node_cnt++, proj);
 
   // Select the right register save policy.
@@ -708,7 +711,7 @@
       uint local = 0;
       for( uint j=0; j<cnt; j++ ) {
         Node *m = n->in(j);
-        if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
+        if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
           local++;              // One more block-local input
       }
       ready_cnt.at_put(n->_idx, local); // Count em up
@@ -720,7 +723,7 @@
           for (uint prec = n->req(); prec < n->len(); prec++) {
             Node* oop_store = n->in(prec);
             if (oop_store != NULL) {
-              assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+              assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
             }
           }
         }
@@ -753,7 +756,7 @@
     Node *n = _nodes[i3];       // Get pre-scheduled
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j);
-      if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
+      if (cfg->get_block_for_node(m) == this) { // Local-block user
         int m_cnt = ready_cnt.at(m->_idx)-1;
         ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
       }
@@ -786,7 +789,7 @@
   }
 
   // Warm up the 'next_call' heuristic bits
-  needed_for_next_call(_nodes[0], next_call, cfg->_bbs);
+  needed_for_next_call(_nodes[0], next_call, cfg);
 
 #ifndef PRODUCT
     if (cfg->trace_opto_pipelining()) {
@@ -837,7 +840,7 @@
 #endif
     if( n->is_MachCall() ) {
       MachCallNode *mcall = n->as_MachCall();
-      phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
+      phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
       continue;
     }
 
@@ -847,7 +850,7 @@
       regs.OR(n->out_RegMask());
 
       MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
-      cfg->_bbs.map(proj->_idx,this);
+      cfg->map_node_to_block(proj, this);
       _nodes.insert(phi_cnt++, proj);
 
       add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
@@ -856,7 +859,9 @@
     // Children are now all ready
     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
       Node* m = n->fast_out(i5); // Get user
-      if( cfg->_bbs[m->_idx] != this ) continue;
+      if (cfg->get_block_for_node(m) != this) {
+        continue;
+      }
       if( m->is_Phi() ) continue;
       if (m->_idx >= max_idx) { // new node, skip it
         assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
@@ -914,7 +919,7 @@
 }
 
 //------------------------------catch_cleanup_find_cloned_def------------------
-static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
+static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
   assert( use_blk != def_blk, "Inter-block cleanup only");
 
   // The use is some block below the Catch.  Find and return the clone of the def
@@ -940,7 +945,8 @@
     // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
     Node_Array inputs = new Node_List(Thread::current()->resource_area());
     for(uint k = 1; k < use_blk->num_preds(); k++) {
-      inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx));
+      Block* block = cfg->get_block_for_node(use_blk->pred(k));
+      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
     }
 
     // Check to see if the use_blk already has an identical phi inserted.
@@ -962,7 +968,7 @@
     if (fixup == NULL) {
       Node *new_phi = PhiNode::make(use_blk->head(), def);
       use_blk->_nodes.insert(1, new_phi);
-      bbs.map(new_phi->_idx, use_blk);
+      cfg->map_node_to_block(new_phi, use_blk);
       for (uint k = 1; k < use_blk->num_preds(); k++) {
         new_phi->set_req(k, inputs[k]);
       }
@@ -1002,17 +1008,17 @@
 //------------------------------catch_cleanup_inter_block---------------------
 // Fix all input edges in use that reference "def".  The use is in a different
 // block than the def.
-static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
+static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
   if( !use_blk ) return;        // Can happen if the use is a precedence edge
 
-  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx);
+  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
   catch_cleanup_fix_all_inputs(use, def, new_def);
 }
 
 //------------------------------call_catch_cleanup-----------------------------
 // If we inserted any instructions between a Call and his CatchNode,
 // clone the instructions on all paths below the Catch.
-void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
+void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
 
   // End of region to clone
   uint end = end_idx();
@@ -1037,7 +1043,7 @@
       // since clones dominate on each path.
       Node *clone = _nodes[j-1]->clone();
       sb->_nodes.insert( 1, clone );
-      bbs.map(clone->_idx,sb);
+      cfg->map_node_to_block(clone, sb);
     }
   }
 
@@ -1054,18 +1060,19 @@
     uint max = out->size();
     for (uint j = 0; j < max; j++) {// For all users
       Node *use = out->pop();
-      Block *buse = bbs[use->_idx];
+      Block *buse = cfg->get_block_for_node(use);
       if( use->is_Phi() ) {
         for( uint k = 1; k < use->req(); k++ )
           if( use->in(k) == n ) {
-            Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx);
+            Block* block = cfg->get_block_for_node(buse->pred(k));
+            Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
             use->set_req(k, fixup);
           }
       } else {
         if (this == buse) {
           catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
         } else {
-          catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx);
+          catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
         }
       }
     } // End for all users
--- a/hotspot/src/share/vm/opto/live.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/live.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -101,7 +101,7 @@
       for( uint k=1; k<cnt; k++ ) {
         Node *nk = n->in(k);
         uint nkidx = nk->_idx;
-        if( _cfg._bbs[nkidx] != b ) {
+        if (_cfg.get_block_for_node(nk) != b) {
           uint u = _names[nkidx];
           use->insert( u );
           DEBUG_ONLY(def_outside->insert( u );)
@@ -121,7 +121,7 @@
 
     // Push these live-in things to predecessors
     for( uint l=1; l<b->num_preds(); l++ ) {
-      Block *p = _cfg._bbs[b->pred(l)->_idx];
+      Block *p = _cfg.get_block_for_node(b->pred(l));
       add_liveout( p, use, first_pass );
 
       // PhiNode uses go in the live-out set of prior blocks.
@@ -142,8 +142,10 @@
       assert( delta->count(), "missing delta set" );
 
       // Add new-live-in to predecessors live-out sets
-      for( uint l=1; l<b->num_preds(); l++ )
-        add_liveout( _cfg._bbs[b->pred(l)->_idx], delta, first_pass );
+      for (uint l = 1; l < b->num_preds(); l++) {
+        Block* block = _cfg.get_block_for_node(b->pred(l));
+        add_liveout(block, delta, first_pass);
+      }
 
       freeset(b);
     } // End of while-worklist-not-empty
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -624,8 +624,6 @@
 }
 
 
-#define MAX_UNROLL 16 // maximum number of unrolls for main loop
-
 //------------------------------policy_unroll----------------------------------
 // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
 // the loop is a CountedLoop and the body is small enough.
@@ -642,7 +640,7 @@
   if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
 
   int future_unroll_ct = cl->unrolled_count() * 2;
-  if (future_unroll_ct > MAX_UNROLL) return false;
+  if (future_unroll_ct > LoopMaxUnroll) return false;
 
   // Check for initial stride being a small enough constant
   if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
--- a/hotspot/src/share/vm/opto/node.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -42,7 +42,6 @@
 class AllocateArrayNode;
 class AllocateNode;
 class Block;
-class Block_Array;
 class BoolNode;
 class BoxLockNode;
 class CMoveNode;
--- a/hotspot/src/share/vm/opto/output.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -68,7 +68,6 @@
     return;
   }
   // Make sure I can find the Start Node
-  Block_Array& bbs = _cfg->_bbs;
   Block *entry = _cfg->_blocks[1];
   Block *broot = _cfg->_broot;
 
@@ -77,8 +76,8 @@
   // Replace StartNode with prolog
   MachPrologNode *prolog = new (this) MachPrologNode();
   entry->_nodes.map( 0, prolog );
-  bbs.map( prolog->_idx, entry );
-  bbs.map( start->_idx, NULL ); // start is no longer in any block
+  _cfg->map_node_to_block(prolog, entry);
+  _cfg->unmap_node_from_block(start); // start is no longer in any block
 
   // Virtual methods need an unverified entry point
 
@@ -117,8 +116,7 @@
       if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
         MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
         b->add_inst( epilog );
-        bbs.map(epilog->_idx, b);
-        //_regalloc->set_bad(epilog->_idx); // Already initialized this way.
+        _cfg->map_node_to_block(epilog, b);
       }
     }
   }
@@ -252,7 +250,7 @@
         if (insert) {
           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
           b->_nodes.insert( j, zap );
-          _cfg->_bbs.map( zap->_idx, b );
+          _cfg->map_node_to_block(zap, b);
           ++j;
         }
       }
@@ -1234,7 +1232,7 @@
 #ifdef ASSERT
     if (!b->is_connector()) {
       stringStream st;
-      b->dump_head(&_cfg->_bbs, &st);
+      b->dump_head(_cfg, &st);
       MacroAssembler(cb).block_comment(st.as_string());
     }
     jmp_target[i] = 0;
@@ -1310,7 +1308,7 @@
           MachNode *nop = new (this) MachNopNode(nops_cnt);
           b->_nodes.insert(j++, nop);
           last_inst++;
-          _cfg->_bbs.map( nop->_idx, b );
+          _cfg->map_node_to_block(nop, b);
           nop->emit(*cb, _regalloc);
           cb->flush_bundle(true);
           current_offset = cb->insts_size();
@@ -1395,7 +1393,7 @@
               if (needs_padding && replacement->avoid_back_to_back()) {
                 MachNode *nop = new (this) MachNopNode();
                 b->_nodes.insert(j++, nop);
-                _cfg->_bbs.map(nop->_idx, b);
+                _cfg->map_node_to_block(nop, b);
                 last_inst++;
                 nop->emit(*cb, _regalloc);
                 cb->flush_bundle(true);
@@ -1549,7 +1547,7 @@
       if( padding > 0 ) {
         MachNode *nop = new (this) MachNopNode(padding / nop_size);
         b->_nodes.insert( b->_nodes.size(), nop );
-        _cfg->_bbs.map( nop->_idx, b );
+        _cfg->map_node_to_block(nop, b);
         nop->emit(*cb, _regalloc);
         current_offset = cb->insts_size();
       }
@@ -1737,7 +1735,6 @@
 Scheduling::Scheduling(Arena *arena, Compile &compile)
   : _arena(arena),
     _cfg(compile.cfg()),
-    _bbs(compile.cfg()->_bbs),
     _regalloc(compile.regalloc()),
     _reg_node(arena),
     _bundle_instr_count(0),
@@ -2085,8 +2082,9 @@
     if( def->is_Proj() )        // If this is a machine projection, then
       def = def->in(0);         // propagate usage thru to the base instruction
 
-    if( _bbs[def->_idx] != bb ) // Ignore if not block-local
+    if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
       continue;
+    }
 
     // Compute the latency
     uint l = _bundle_cycle_number + n->latency(i);
@@ -2358,9 +2356,10 @@
       Node *inp = n->in(k);
       if (!inp) continue;
       assert(inp != n, "no cycles allowed" );
-      if( _bbs[inp->_idx] == bb ) { // Block-local use?
-        if( inp->is_Proj() )    // Skip through Proj's
+      if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
+        if (inp->is_Proj()) { // Skip through Proj's
           inp = inp->in(0);
+        }
         ++_uses[inp->_idx];     // Count 1 block-local use
       }
     }
@@ -2643,7 +2642,7 @@
     return;
 
   Node *pinch = _reg_node[def_reg]; // Get pinch point
-  if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet?
+  if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
       is_def ) {    // Check for a true def (not a kill)
     _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
     return;
@@ -2669,7 +2668,7 @@
       _cfg->C->record_method_not_compilable("too many D-U pinch points");
       return;
     }
-    _bbs.map(pinch->_idx,b);      // Pretend it's valid in this block (lazy init)
+    _cfg->map_node_to_block(pinch, b);      // Pretend it's valid in this block (lazy init)
     _reg_node.map(def_reg,pinch); // Record pinch-point
     //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
     if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
@@ -2713,9 +2712,9 @@
     return;
   Node *pinch = _reg_node[use_reg]; // Get pinch point
   // Check for no later def_reg/kill in block
-  if( pinch && _bbs[pinch->_idx] == b &&
+  if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
       // Use has to be block-local as well
-      _bbs[use->_idx] == b ) {
+      _cfg->get_block_for_node(use) == b) {
     if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
         pinch->req() == 1 ) {   // pinch not yet in block?
       pinch->del_req(0);        // yank pointer to later-def, also set flag
@@ -2895,7 +2894,7 @@
     int trace_cnt = 0;
     for (uint k = 0; k < _reg_node.Size(); k++) {
       Node* pinch = _reg_node[k];
-      if (pinch != NULL && pinch->Opcode() == Op_Node &&
+      if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
           // no predecence input edges
           (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
         cleanup_pinch(pinch);
--- a/hotspot/src/share/vm/opto/output.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/output.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -96,9 +96,6 @@
   // List of nodes currently available for choosing for scheduling
   Node_List _available;
 
-  // Mapping from node (index) to basic block
-  Block_Array& _bbs;
-
   // For each instruction beginning a bundle, the number of following
   // nodes to be bundled with it.
   Bundle *_node_bundling_base;
--- a/hotspot/src/share/vm/opto/postaloc.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/postaloc.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -78,11 +78,13 @@
 // Helper function for yank_if_dead
 int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
   int blk_adjust=0;
-  Block *oldb = _cfg._bbs[old->_idx];
+  Block *oldb = _cfg.get_block_for_node(old);
   oldb->find_remove(old);
   // Count 1 if deleting an instruction from the current block
-  if( oldb == current_block ) blk_adjust++;
-  _cfg._bbs.map(old->_idx,NULL);
+  if (oldb == current_block) {
+    blk_adjust++;
+  }
+  _cfg.unmap_node_from_block(old);
   OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
   if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
     value->map(old_reg,NULL);  // Yank from value/regnd maps
@@ -433,7 +435,7 @@
     bool missing_some_inputs = false;
     Block *freed = NULL;
     for( j = 1; j < b->num_preds(); j++ ) {
-      Block *pb = _cfg._bbs[b->pred(j)->_idx];
+      Block *pb = _cfg.get_block_for_node(b->pred(j));
       // Remove copies along phi edges
       for( uint k=1; k<phi_dex; k++ )
         elide_copy( b->_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false );
@@ -478,7 +480,7 @@
     } else {
       if( !freed ) {            // Didn't get a freebie prior block
         // Must clone some data
-        freed = _cfg._bbs[b->pred(1)->_idx];
+        freed = _cfg.get_block_for_node(b->pred(1));
         Node_List &f_value = *blk2value[freed->_pre_order];
         Node_List &f_regnd = *blk2regnd[freed->_pre_order];
         for( uint k = 0; k < (uint)_max_reg; k++ ) {
@@ -488,7 +490,7 @@
       }
       // Merge all inputs together, setting to NULL any conflicts.
       for( j = 1; j < b->num_preds(); j++ ) {
-        Block *pb = _cfg._bbs[b->pred(j)->_idx];
+        Block *pb = _cfg.get_block_for_node(b->pred(j));
         if( pb == freed ) continue; // Did self already via freelist
         Node_List &p_regnd = *blk2regnd[pb->_pre_order];
         for( uint k = 0; k < (uint)_max_reg; k++ ) {
@@ -515,8 +517,9 @@
           u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
       }
       if( u != NodeSentinel ) {    // Junk Phi.  Remove
-        b->_nodes.remove(j--); phi_dex--;
-        _cfg._bbs.map(phi->_idx,NULL);
+        b->_nodes.remove(j--);
+        phi_dex--;
+        _cfg.unmap_node_from_block(phi);
         phi->replace_by(u);
         phi->disconnect_inputs(NULL, C);
         continue;
--- a/hotspot/src/share/vm/opto/reg_split.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/opto/reg_split.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -132,7 +132,7 @@
   }
 
   b->_nodes.insert(i,spill);    // Insert node in block
-  _cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect
+  _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
   // Adjust the point where we go hi-pressure
   if( i <= b->_ihrp_index ) b->_ihrp_index++;
   if( i <= b->_fhrp_index ) b->_fhrp_index++;
@@ -219,7 +219,7 @@
         use->set_req(useidx, def);
       } else {
         // Block and index where the use occurs.
-        Block *b = _cfg._bbs[use->_idx];
+        Block *b = _cfg.get_block_for_node(use);
         // Put the clone just prior to use
         int bindex = b->find_node(use);
         // DEF is UP, so must copy it DOWN and hook in USE
@@ -270,7 +270,7 @@
   int bindex;
   // Phi input spill-copys belong at the end of the prior block
   if( use->is_Phi() ) {
-    b = _cfg._bbs[b->pred(useidx)->_idx];
+    b = _cfg.get_block_for_node(b->pred(useidx));
     bindex = b->end_idx();
   } else {
     // Put the clone just prior to use
@@ -335,7 +335,7 @@
         continue;
       }
 
-      Block *b_def = _cfg._bbs[def->_idx];
+      Block *b_def = _cfg.get_block_for_node(def);
       int idx_def = b_def->find_node(def);
       Node *in_spill = get_spillcopy_wide( in, def, i );
       if( !in_spill ) return 0; // Bailed out
@@ -589,7 +589,7 @@
         UPblock[slidx] = true;
         // Record following instruction in case 'n' rematerializes and
         // kills flags
-        Block *pred1 = _cfg._bbs[b->pred(1)->_idx];
+        Block *pred1 = _cfg.get_block_for_node(b->pred(1));
         continue;
       }
 
@@ -601,7 +601,7 @@
       // Grab predecessor block header
       n1 = b->pred(1);
       // Grab the appropriate reaching def info for inpidx
-      pred = _cfg._bbs[n1->_idx];
+      pred = _cfg.get_block_for_node(n1);
       pidx = pred->_pre_order;
       Node **Ltmp = Reaches[pidx];
       bool  *Utmp = UP[pidx];
@@ -616,7 +616,7 @@
         // Grab predecessor block headers
         n2 = b->pred(inpidx);
         // Grab the appropriate reaching def info for inpidx
-        pred = _cfg._bbs[n2->_idx];
+        pred = _cfg.get_block_for_node(n2);
         pidx = pred->_pre_order;
         Ltmp = Reaches[pidx];
         Utmp = UP[pidx];
@@ -701,7 +701,7 @@
         // Grab predecessor block header
         n1 = b->pred(1);
         // Grab the appropriate reaching def info for k
-        pred = _cfg._bbs[n1->_idx];
+        pred = _cfg.get_block_for_node(n1);
         pidx = pred->_pre_order;
         Node **Ltmp = Reaches[pidx];
         bool  *Utmp = UP[pidx];
@@ -919,7 +919,7 @@
                 return 0;
               }
               _lrg_map.extend(def->_idx, 0);
-              _cfg._bbs.map(def->_idx,b);
+              _cfg.map_node_to_block(def, b);
               n->set_req(inpidx, def);
               continue;
             }
@@ -1291,7 +1291,7 @@
   for( insidx = 0; insidx < phis->size(); insidx++ ) {
     Node *phi = phis->at(insidx);
     assert(phi->is_Phi(),"This list must only contain Phi Nodes");
-    Block *b = _cfg._bbs[phi->_idx];
+    Block *b = _cfg.get_block_for_node(phi);
     // Grab the live range number
     uint lidx = _lrg_map.find_id(phi);
     uint slidx = lrg2reach[lidx];
@@ -1315,7 +1315,7 @@
     // DEF has the wrong UP/DOWN value.
     for( uint i = 1; i < b->num_preds(); i++ ) {
       // Get predecessor block pre-order number
-      Block *pred = _cfg._bbs[b->pred(i)->_idx];
+      Block *pred = _cfg.get_block_for_node(b->pred(i));
       pidx = pred->_pre_order;
       // Grab reaching def
       Node *def = Reaches[pidx][slidx];
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -3217,15 +3217,6 @@
   JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
   jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop);
 
-  if (the_class_oop == Universe::reflect_invoke_cache()->klass()) {
-    // We are redefining java.lang.reflect.Method. Method.invoke() is
-    // cached and users of the cache care about each active version of
-    // the method so we have to track this previous version.
-    // Do this before methods get switched
-    Universe::reflect_invoke_cache()->add_previous_version(
-      the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum()));
-  }
-
   // Deoptimize all compiled code that depends on this class
   flush_dependent_code(the_class, THREAD);
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Aug 16 04:14:12 2013 -0700
@@ -1098,7 +1098,7 @@
                                                                                                                                      \
   c2_nonstatic_field(PhaseCFG,           _num_blocks,              uint)                                                             \
   c2_nonstatic_field(PhaseCFG,           _blocks,                  Block_List)                                                       \
-  c2_nonstatic_field(PhaseCFG,           _bbs,                     Block_Array)                                                      \
+  c2_nonstatic_field(PhaseCFG,           _node_to_block_mapping,   Block_Array)                                                      \
   c2_nonstatic_field(PhaseCFG,           _broot,                   Block*)                                                           \
                                                                                                                                      \
   c2_nonstatic_field(PhaseRegAlloc,      _node_regs,               OptoRegPair*)                                                     \
--- a/hotspot/src/share/vm/utilities/debug.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/utilities/debug.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -225,6 +225,22 @@
 
 void warning(const char* format, ...);
 
+#ifdef ASSERT
+// Compile-time asserts.
+template <bool> struct StaticAssert;
+template <> struct StaticAssert<true> {};
+
+// Only StaticAssert<true> is defined, so if cond evaluates to false we get
+// a compile time exception when trying to use StaticAssert<false>.
+#define STATIC_ASSERT(cond)                   \
+  do {                                        \
+    StaticAssert<(cond)> DUMMY_STATIC_ASSERT; \
+    (void)DUMMY_STATIC_ASSERT; /* ignore */   \
+  } while (false)
+#else
+#define STATIC_ASSERT(cond)
+#endif
+
 // out of shared space reporting
 enum SharedSpaceType {
   SharedPermGen,
--- a/hotspot/src/share/vm/utilities/exceptions.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/utilities/exceptions.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -306,6 +306,6 @@
 // which preserves pre-existing exceptions and does not allow new
 // exceptions.
 
-#define EXCEPTION_MARK                           Thread* THREAD; ExceptionMark __em(THREAD);
+#define EXCEPTION_MARK                           Thread* THREAD = NULL; ExceptionMark __em(THREAD);
 
 #endif // SHARE_VM_UTILITIES_EXCEPTIONS_HPP
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri Aug 16 04:14:12 2013 -0700
@@ -410,6 +410,8 @@
   return align_size_down_(size, alignment);
 }
 
+#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
+
 // Align objects by rounding up their size, in HeapWord units.
 
 #define align_object_size_(size) align_size_up_(size, MinObjAlignment)
@@ -428,6 +430,10 @@
   return align_size_up(offset, HeapWordsPerLong);
 }
 
+inline void* align_pointer_up(const void* addr, size_t size) {
+  return (void*) align_size_up_((uintptr_t)addr, size);
+}
+
 // Clamp an address to be within a specific page
 // 1. If addr is on the page it is returned as is
 // 2. If addr is above the page_address the start of the *next* page will be returned
@@ -449,32 +455,6 @@
 // The expected size in bytes of a cache line, used to pad data structures.
 #define DEFAULT_CACHE_LINE_SIZE 64
 
-// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
-// expected cache line size (a power of two).  The first addend avoids sharing
-// when the start address is not a multiple of alignment; the second maintains
-// alignment of starting addresses that happen to be a multiple.
-#define PADDING_SIZE(type, alignment)                           \
-  ((alignment) + align_size_up_(sizeof(type), alignment))
-
-// Templates to create a subclass padded to avoid cache line sharing.  These are
-// effective only when applied to derived-most (leaf) classes.
-
-// When no args are passed to the base ctor.
-template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
-class Padded: public T {
-private:
-  char _pad_buf_[PADDING_SIZE(T, alignment)];
-};
-
-// When either 0 or 1 args may be passed to the base ctor.
-template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
-class Padded01: public T {
-public:
-  Padded01(): T() { }
-  Padded01(Arg1T arg1): T(arg1) { }
-private:
-  char _pad_buf_[PADDING_SIZE(T, alignment)];
-};
 
 //----------------------------------------------------------------------------------------------------
 // Utility macros for compilers
--- a/hotspot/test/compiler/whitebox/ClearMethodStateTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/ClearMethodStateTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -26,7 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build ClearMethodStateTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ClearMethodStateTest
+ * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* ClearMethodStateTest
  * @summary testing of WB::clearMethodState()
  * @author igor.ignatyev@oracle.com
  */
--- a/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -61,6 +61,9 @@
     /** Value of {@code -XX:TieredStopAtLevel} */
     protected static final int TIERED_STOP_AT_LEVEL
             = Integer.parseInt(getVMOption("TieredStopAtLevel", "0"));
+    /** Flag for verbose output, true if {@code -Dverbose} specified */
+    protected static final boolean IS_VERBOSE
+            = System.getProperty("verbose") != null;
 
     /**
      * Returns value of VM option.
@@ -268,7 +271,9 @@
             }
             result += tmp == null ? 0 : tmp;
         }
-        System.out.println("method was invoked " + count + " times");
+        if (IS_VERBOSE) {
+            System.out.println("method was invoked " + count + " times");
+        }
         return result;
     }
 }
--- a/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -26,7 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build DeoptimizeAllTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeAllTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* DeoptimizeAllTest
  * @summary testing of WB::deoptimizeAll()
  * @author igor.ignatyev@oracle.com
  */
--- a/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -26,7 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build DeoptimizeMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeMethodTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* DeoptimizeMethodTest
  * @summary testing of WB::deoptimizeMethod()
  * @author igor.ignatyev@oracle.com
  */
--- a/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -26,7 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build EnqueueMethodForCompilationTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI EnqueueMethodForCompilationTest
+ * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* EnqueueMethodForCompilationTest
  * @summary testing of WB::enqueueMethodForCompilation()
  * @author igor.ignatyev@oracle.com
  */
--- a/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -27,7 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build IsMethodCompilableTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI IsMethodCompilableTest
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* IsMethodCompilableTest
  * @summary testing of WB::isMethodCompilable()
  * @author igor.ignatyev@oracle.com
  */
--- a/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -27,7 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build MakeMethodNotCompilableTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI MakeMethodNotCompilableTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* MakeMethodNotCompilableTest
  * @summary testing of WB::makeMethodNotCompilable()
  * @author igor.ignatyev@oracle.com
  */
--- a/hotspot/test/compiler/whitebox/SetDontInlineMethodTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/SetDontInlineMethodTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -26,7 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build SetDontInlineMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetDontInlineMethodTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* SetDontInlineMethodTest
  * @summary testing of WB::testSetDontInlineMethod()
  * @author igor.ignatyev@oracle.com
  */
--- a/hotspot/test/compiler/whitebox/SetForceInlineMethodTest.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/compiler/whitebox/SetForceInlineMethodTest.java	Fri Aug 16 04:14:12 2013 -0700
@@ -26,7 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build SetForceInlineMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetForceInlineMethodTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* SetForceInlineMethodTest
  * @summary testing of WB::testSetForceInlineMethod()
  * @author igor.ignatyev@oracle.com
  */
--- a/hotspot/test/runtime/7107135/Test7107135.sh	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/runtime/7107135/Test7107135.sh	Fri Aug 16 04:14:12 2013 -0700
@@ -53,9 +53,6 @@
     fi
     ;;
   *)
-    NULL=NUL
-    PS=";"
-    FS="\\"
     echo "Test passed; only valid for Linux"
     exit 0;
     ;;
@@ -87,14 +84,16 @@
 
 echo
 echo Test changing of stack protection:
-echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rw
+echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rwx
 ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rwx
+JAVA_RETVAL=$?
 
-if [ "$?" == "0" ]
+if [ "$JAVA_RETVAL" == "0" ]
 then
   echo
   echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} TestMT test-rwx
   ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} TestMT test-rwx
+  JAVA_RETVAL=$?
 fi
 
-exit $?
+exit $JAVA_RETVAL
--- a/hotspot/test/runtime/RedefineObject/Agent.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/runtime/RedefineObject/Agent.java	Fri Aug 16 04:14:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
  */
 import java.security.*;
 import java.lang.instrument.*;
+import java.lang.reflect.*;
 
 public class Agent implements ClassFileTransformer {
     public synchronized byte[] transform(final ClassLoader classLoader,
@@ -29,22 +30,34 @@
                                          Class<?> classBeingRedefined,
                                          ProtectionDomain protectionDomain,
                                          byte[] classfileBuffer) {
-        //System.out.println("Transforming class " + className);
+        System.out.println("Transforming class " + className);
         return classfileBuffer;
     }
 
+    public static void redefine(String agentArgs, Instrumentation instrumentation, Class to_redefine) {
+
+        try {
+            instrumentation.retransformClasses(to_redefine);
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+
+    }
+
     public static void premain(String agentArgs, Instrumentation instrumentation) {
-
         Agent transformer = new Agent();
-
         instrumentation.addTransformer(transformer, true);
 
-        Class c = Object.class;
-        try {
-            instrumentation.retransformClasses(c);
-        } catch (Exception e) {
-            e.printStackTrace();
-        }
+        // Redefine java/lang/Object and java/lang/reflect/Method.invoke and
+        // java/lang/ClassLoader
+        Class object_class = Object.class;
+        redefine(agentArgs, instrumentation, object_class);
+
+        Class method_class = Method.class;
+        redefine(agentArgs, instrumentation, method_class);
+
+        Class loader_class = ClassLoader.class;
+        redefine(agentArgs, instrumentation, loader_class);
 
         instrumentation.removeTransformer(transformer);
     }
@@ -57,5 +70,14 @@
             System.gc();
             ba.clone();
         }
+        try {
+            // Use java/lang/reflect/Method.invoke to call
+            WalkThroughInvoke a = new WalkThroughInvoke();
+            Class aclass = WalkThroughInvoke.class;
+            Method m = aclass.getMethod("stackWalk");
+            m.invoke(a);
+        } catch (Exception x) {
+            x.printStackTrace();
+        }
     }
 }
--- a/hotspot/test/runtime/RedefineObject/TestRedefineObject.java	Thu Aug 15 09:25:33 2013 -0700
+++ b/hotspot/test/runtime/RedefineObject/TestRedefineObject.java	Fri Aug 16 04:14:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,14 +26,17 @@
 /*
  * Test to redefine java/lang/Object and verify that it doesn't crash on vtable
  * call on basic array type.
+ * Test to redefine java/lang/ClassLoader and java/lang/reflect/Method to make
+ * sure cached versions used afterward are the current version.
  *
  * @test
  * @bug 8005056
+ * @bug 8009728
  * @library /testlibrary
  * @build Agent
  * @run main ClassFileInstaller Agent
  * @run main TestRedefineObject
- * @run main/othervm -javaagent:agent.jar Agent
+ * @run main/othervm -javaagent:agent.jar -XX:TraceRedefineClasses=5 Agent
  */
 public class TestRedefineObject {
     public static void main(String[] args) throws Exception  {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/RedefineObject/WalkThroughInvoke.java	Fri Aug 16 04:14:12 2013 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+import java.lang.reflect.*;
+
+public class WalkThroughInvoke {
+  public void stackWalk() {
+      try {
+          Class b = Object.class;
+          SecurityManager sm = new SecurityManager();
+          // Walks the stack with Method.invoke in the stack (which is the
+          // purpose of the test) before it gets an AccessControlException.
+          sm.checkMemberAccess(b, Member.DECLARED);
+      } catch (java.security.AccessControlException e) {
+          // Ignoring an 'AccessControlException' exception since
+          // it is expected as part of this test.
+      }
+  }
+};